query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Draws interpolation plot for given interpolation polynomial and nodes.
def draw_interpolation_plot(start_x, end_x, interpolation_polynomial, nodes, freq=200, additional_polynomial=None, additional_nodes=None): # TODO: calculate figure size dynamically plt.figure(figsize=(8, 6), dpi=80) x = numpy.linspace(start_x, end_x, freq) # TODO: eval should be changed to something more secure (like numexpr evaluate())... y = eval(str(interpolation_polynomial)) plt.subplot(211) plt.plot(x, y, [node[0] for node in nodes], [node[1] for node in nodes], 'ro') plt.grid(True) if additional_polynomial: poly_values = eval(str(additional_polynomial)) plt.subplot(212) plt.plot(x, poly_values, [node[0] for node in additional_nodes], [node[1] for node in additional_nodes], 'ro') plt.grid(True) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawPolynomial(self, index, color, precision=200):\n graph = self.graphs[index]\n if len(graph) > 1:\n p = PolynomialInterpolation(graph, color)\n p.show(self.context, precision)", "def plot_interpolation(self):\r\n self.plot_all_logcalls(True)\r\n print_log('info', 'Interpolation was finished.')", "def plot_interpolation(ddG_x,ddG_y,ddG_x2,ddG_y2,ddG_x_interp,ddG_y_interp,close=True,save=True):\n\tplt.rc('text', usetex=True)\n\tplt.plot(ddG_x,ddG_y,'*',markersize=5,fillstyle='none')\n\tplt.plot(ddG_x2,ddG_y2,'o',markersize=5,fillstyle='none')\n\tplt.plot(ddG_x_interp, ddG_y_interp, '-')\n\tplt.legend(['input lambdas', 'cub/lin interpolated lambdas','cubic interpolation func'], loc='best')\n\tplt.xlabel(r'$\\lambda$') \n\tplt.ylabel(r\"$\\Delta \\Delta G / \\mathrm{kJ mol^{-1}}$\")\n\tif save:\n\t\tplt.savefig('ddG_interpolation.pdf')\n\tif close:\n\t\tplt.close()", "def make_plot(x,y):", "def plot_polynomial(self):\n plt.scatter(self.x_values, self.y_values)\n plt.title(f\"Graph of polynomial between {np.floor(min(self.x_values))} and {np.ceil(max(self.x_values))}\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.show()", "def _plot_interpolation(x, y, x_new, y_new, title=\"\"):\n f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)\n axes = (ax1, ax2, ax3)\n coord = [\"X\", \"Y\", \"Z\"]\n\n for idx, ax in enumerate(axes):\n ax.set_title(title + \" (\" + coord[idx] + \" coordinate)\", fontsize=12)\n ax.set_ylabel(\"m\")\n ax.plot(x, y[:, idx], \"bo\", label=\"Original data\")\n ax.plot(x_new, y_new[:, idx], \"ro\", label=\"Interpolated data\")\n\n ax3.set_xlabel(\"Time\")\n ax1.legend(fontsize=8, loc=1)\n f.subplots_adjust(hspace=0.3)\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\n plt.show()", "def solve_i():\r\n x = np.array([ -2.1, -1.45, -1.3, -0.2, 0.1, 0.15, 0.8, 1.1, 1.5, 2.8, 3.8 ])\r\n y = np.array([0.012155, 0.122151, 0.184520, 0.960789, 0.990050, 0.977751,\r\n 0.527292, 0.298197, 0.105399, 3.936690E-4, 5.355348E-7])\r\n # find and plot both interpolations and the oiginal points\r\n plt.figure(1)\r\n cubic_interpol(x,y)\r\n lin_interpol(x,y)\r\n plt.plot(x, y, 'rx', ms = 10, label = 'Points')\r\n # plot settings\r\n plt.title('Cubic & Linear Interpolation Given Points')\r\n plt.xlabel('x',fontsize = 14)\r\n plt.ylabel('y',fontsize = 14)\r\n plt.legend()", "def graph_points():\n fig_name = 'lect2_num_solv'\n\n # given data\n x = np.array([0.0, 0.4, 0.6, 0.8])\n ra = np.array([0.01, 0.0080, 0.005, 0.002])\n design_eq = np.divide(2.0, ra)\n print(\"Generic example design equation points: {}\".format([\"{:0.1f}\".format(x) for x in design_eq]))\n\n # cubic spline\n x_new = np.linspace(0.0, 0.8, 101)\n # alternately, from interpolation\n y_interp = interpolate.interp1d(x, design_eq, kind='quadratic')\n make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_interp(x_new),\n x_label=r'conversion (X, unitless)', y_label=r'$\\displaystyle\\frac{F_{A0}}{-r_A} \\left(L\\right)$',\n x_lima=0.0, x_limb=0.8, y_lima=0.0, y_limb=1000,\n fig_width=4, color2='green',\n )", "def plot_all_logcalls(self, interpolation=False):\r\n\r\n # instead of ax.hold(False)\r\n self.figure.clear()\r\n\r\n # create an axis\r\n self.ax1 = self.figure.add_subplot(111)\r\n self.ax1.set_title('Logcalls pattern')\r\n\r\n any_node_logcalls = dict()\r\n sr_x = list()\r\n\r\n for e in self.called_func_route:\r\n for i in self.nodes:\r\n (i not in sr_x) and sr_x.append(i)\r\n if e not in any_node_logcalls:\r\n any_node_logcalls[e] = list()\r\n any_node_logcalls[e].append(self.nodes[i].get_func_count(e))\r\n\r\n progress_value = 0\r\n self.progressBar.setValue(0)\r\n for e in self.called_func_route:\r\n sr_fx = any_node_logcalls[e]\r\n self.ax1.plot(sr_x, sr_fx, linestyle='', marker='o', color='b')\r\n\r\n if interpolation:\r\n Lx = self.get_sub_two_interpolation_func(sr_x, sr_fx)\r\n # Enlargement the range to 10 folds for drawing the result of interpolation.\r\n self.tmp_x = [\r\n i / 10.0 for i in range(sr_x[0] * 10, sr_x[-1] * 10 + 1)\r\n ]\r\n self.tmp_y = [Lx(i) for i in self.tmp_x]\r\n self.ax1.plot(\r\n self.tmp_x, self.tmp_y, linestyle='--', marker='', label=e)\r\n else:\r\n self.ax1.plot(sr_x, sr_fx, linestyle='--', marker='o', label=e)\r\n\r\n progress_value += 1\r\n self.progressBar.setValue(\r\n float(progress_value) / len(self.called_func_route) * 100)\r\n\r\n self.ax1.legend(loc='best')\r\n\r\n # refresh canvas\r\n self.canvas.draw()", "def plot(self):\n\t\tself.plotOfIP().plot()", "def polynomialInterpolation2D(self,graph,T):\n x=[graph[i][0] for i in range(len(graph))]\n y=[graph[i][1] for i in range(len(graph))]\n return lambda t:(self.polynomialInterpolation(x)(t),self.polynomialInterpolation(y)(t))", "def cubic_interpol(X_P, Y_P):\r\n y_derivs = derivatives( X_P, Y_P ).flatten() # flatten as FB_sub returns 2d array\r\n \r\n for j in np.arange( X_P.shape[0] - 1 ): # for every x[i] and x[i+1] pair\r\n plot_points = np.linspace( X_P[j], X_P[j+1], 20) # points to plot in the interval\r\n params = [ X_P[j], X_P[j+1], Y_P[j], Y_P[j+1],\r\n y_derivs[j], y_derivs[j+1]]\r\n f_points = f(plot_points, params)\r\n plt.plot(plot_points, f_points, 'b-', ms = .5, label = 'Cubic'if j==0 else \"\") # only label one plot\r", "def plot(self, X, sids, nids):\n X = tocontig(X) # ensure it's contig\n gw = self.glWidget\n gw.points = X\n gw.npoints = len(X)\n gw.sids = sids\n gw.nids = nids\n gw.color() # set colors\n gw.updateGL()", "def plot(self, X, sids, nids):\n X = tocontig(X) # ensure it's contig\n gw = self.glWidget\n gw.points = X\n gw.npoints = len(X)\n gw.sids = sids\n gw.nids = nids\n gw.color() # set colors\n gw.updateGL()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def plot_graph(self) -> None:", "def update_interpolated_and_dots(function_selector, discretization_Slider,\n interpolated_values):\n # Each base point (left or right foot and middle node) is shared by three\n # Finite Elements aside from the two the most right and the most left\n number_points = discretization_slider.value + 2\n\n x = np.linspace(LEFT_X, RIGHT_X, number_points)\n\n y = functions[function_selector.active](x)\n\n interpolated_values.data = {\n \"x\": x,\n \"y\": y\n }", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def plot_nodes(self, node_list):\n points = Marker()\n #visualizations points and lines..\n points.header.frame_id = \"map\"\n points.header.stamp = rospy.get_rostime()\n points.ns = \"markers\"\n points.id = 0\n points.type = points.POINTS\n points.action = points.ADD\n points.pose.orientation.w = 1.0\n points.scale.x = 2*self.rviz_tuning_plt\n points.scale.y = 2*self.rviz_tuning_plt\n points.color.r = 0.0\n points.color.g = 1.0\n points.color.b = 0.0\n points.color.a = 1.0\n points.lifetime = rospy.Duration()\n\n for node in node_list:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.01\n points.points.append(p1)\n \n self.pub_nodes.publish(points)", "def test_plot1(plot=1, version='scalar'):\n Lx = 10\n Ly = 10\n c = 1.0\n\n def I2(x, y):\n return exp(-(x-Lx/2.0)**2/2.0 -(y-Ly/2.0)**2/2.0)\n def f(x, y, t):\n return 0.0\n def bc(x, y, t):\n return 0.0\n\n I2 = StringFunction('exp(-(x-Lx/2.0)**2/2.0 -(y-Ly/2.0)**2/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('0.0', independent_variables=('x', 'y', 't'),\n globals=globals())\n if plot:\n g = Gnuplot.Gnuplot(persist=1)\n g('set parametric')\n g('set data style lines')\n g('set hidden')\n g('set contour base')\n g('set zrange [-0.7:0.7]') # nice plot...\n \n def action(u, xv, yv, t):\n #print 'action, t=',t,'\\nu=',u, '\\nx=',x, '\\ny=', y\n if plot:\n data = Gnuplot.GridData(u, xv[:,0], yv[0,:], binary=0)\n g.splot(data)\n g('set title \"t=%g\"' % t)\n if plot == 2:\n g.hardcopy(filename='tmp_%020f.ps' % t, enhanced=1, mode='eps',\n color=0, fontname='Times-Roman', fontsize=14)\n time.sleep(1)\n time.sleep(0.2) # pause between frames\n\n implementation = {'ic': version, 'inner': version, 'bc': version}\n nx = 40; ny = 40; tstop = 20 # tstop = 700\n print 'test_plot1:', f, bc, I2\n dt, t_ic, t_inner, t_bc = \\\n solver(I2, f, c, bc, Lx, Ly, nx, ny, 0, tstop,\n user_action=action, implementation=implementation)\n print 'time ic: %s, time scheme: %s, time bc: %s' % (t_ic, t_inner, t_bc)\n time.sleep(3)", "def drawPlotParts(x, y, xlabel, ylabel, nparts):\n\tle = len(y)\n\tif x is None:\n\t\tx = list(range(le))\n\tstep = int(le / nparts)\n\tfor i in range(nparts):\n\t\tbeg = i * step\n\t\tend = le if i == nparts - 1 else beg + step\n\t\tdrawPlot(x[beg:end], y[beg:end], xlabel, ylabel)", "def visualize(nodes, weights, new_corners):\n import matplotlib.pyplot as plt\n\n if nodes.shape[1] == 1:\n plt.scatter(nodes[:], np.zeros(nodes.shape), s=weights * 40)\n plt.plot(\n [\n new_corners[0, 0],\n new_corners[0, 0],\n new_corners[1, 0],\n new_corners[1, 0],\n new_corners[0, 0],\n ],\n [0.5, -0.5, -0.5, 0.5, 0.5],\n )\n plt.show()\n elif nodes.shape[1] == 2:\n plt.scatter(nodes[:, 0], nodes[:, 1], s=weights * 40)\n plt.plot(\n [\n new_corners[0, 0],\n new_corners[1, 0],\n new_corners[2, 0],\n new_corners[0, 0],\n ],\n [\n new_corners[0, 1],\n new_corners[1, 1],\n new_corners[2, 1],\n new_corners[0, 1],\n ],\n )\n plt.show()\n elif nodes.shape[1] == 3:\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n ax.scatter(nodes[:, 0], nodes[:, 1], nodes[:, 2], s=weights * 40)\n\n p = new_corners\n for i in range(4):\n verts = [[p[(i) % 4, :], p[(i + 1) % 4, :], p[(i + 2) % 4, :]]]\n srf = Poly3DCollection(verts, alpha=0.25, facecolor=\"#800000\")\n plt.gca().add_collection3d(srf)\n plt.show()", "def polynomialInterpolation(self,s):\n #print(s)\n #s[i]=xi,s[j]=xj\n return Polynomial.createFromInterpolation(s,range(len(s)))\n #return Polynomial(s,T)", "def lin_interpol(x_p, y_p):\r\n f = np.zeros([ x_p.shape[0] - 1 , 4 ]) # Coefficents and interval array\r\n \r\n for i in range( x_p.shape[0] - 1 ): # for every x[i], x[i+1] pair\r\n \r\n x_coeff = (y_p[i+1] - y_p[i]) / (x_p[i+1] - x_p[i])\r\n const = (x_p[i+1]*y_p[i] - x_p[i]*y_p[i+1] ) / (x_p[i+1] - x_p[i])\r\n \r\n # save the x coefficent, constant and the interval for this line\r\n f[i,:] = x_coeff, const, x_p[i], x_p[i+1]\r\n \r\n for a, b, start, end in f: # for every line fitted\r\n line_x = np.linspace( start, end, 3) # points to plot in x_range\r\n line_y = line_x * a + b # find the fitted line value at these points\r\n plt.plot(line_x,line_y,'k--', lw = 1, label = 'Linear' if a==f[0][0] else \"\") # only label one plot\r", "def plot_planned_trajectory(ax, xs, ys, headings, steers, physical_params, interval = 20):\n ax.plot(xs, ys, color=\"r\")\n for i in range(len(steers)):\n # ellipse = Ellipse(xy = (x, y), width = x_length, height = y_length, angle = np.rad2deg(heading), alpha = 0.4, ec = \"k\", fc = fc)\n # ax.add_patch(ellipse)\n if i % interval == 0:\n plot_vehicle(ax, xs[i], ys[i], headings[i], steers[i], 0.7, 0.7, physical_params.wheel_length, physical_params.wheel_width)\n ax.set_xlabel(\"X Position\")\n ax.set_ylabel(\"Y Position\")\n ax.axis('equal')", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def draw_points():\n\n for node in self._nodes:\n\n x = node_properties[\"node_x\"][node]\n y = node_properties[\"node_y\"][node]\n ax.scatter(\n x,\n y,\n zorder=10,\n edgecolors=\"k\",\n linewidths=0.5,\n **self.get_node_data(node),\n )\n\n for label in self._nodes:\n\n x = node_properties[\"label_x\"][label]\n y = node_properties[\"label_y\"][label]\n rotation = node_properties[\"rotation\"][label]\n ha = node_properties[\"ha\"][label]\n\n attr = {**dict(backgroundcolor=\"white\"), **text_attr}\n ax.text(\n x,\n y,\n textwrap.shorten(text=label, width=TEXTLEN),\n rotation=rotation,\n ha=ha,\n va=\"center\",\n rotation_mode=\"anchor\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n **attr,\n )", "def plot():\n pass", "def PlotMeshNumbering(self, figure=None, show_plot=True):\n\n self.__do_essential_memebers_exist__()\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n\n if self.element_type == \"tri\":\n\n if figure is None:\n figure = plt.figure()\n plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])\n plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)\n\n for i in range(0,self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"quad\":\n\n if figure is None:\n figure = plt.figure()\n point_radius = 3.\n\n C = self.InferPolynomialDegree() - 1\n\n edge_elements = self.GetElementsEdgeNumberingQuad()\n reference_edges = NodeArrangementQuad(C)[0]\n reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)\n reference_edges = np.delete(reference_edges,1,1)\n\n self.GetEdgesQuad()\n x_edges = np.zeros((C+2,self.all_edges.shape[0]))\n y_edges = np.zeros((C+2,self.all_edges.shape[0]))\n\n BasesOneD = np.eye(2,2)\n for iedge in range(self.all_edges.shape[0]):\n ielem = edge_elements[iedge,0]\n edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]\n x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T\n\n\n plt.plot(x_edges,y_edges,'-k')\n\n for i in range(self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n\n import matplotlib as mpl\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n if figure is None:\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))\n view = mlab.view()\n figure.scene.disable_render = True\n\n color = mpl.colors.hex2color('#F88379')\n\n linewidth = 3.\n # trimesh_h = mlab.triangular_mesh(self.points[:,0],\n # self.points[:,1], self.points[:,2], self.faces[:,:3],\n # line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),\n # representation='wireframe') # representation='surface'\n\n # # CHANGE LIGHTING OPTION\n # trimesh_h.actor.property.interpolation = 'phong'\n # trimesh_h.actor.property.specular = 0.1\n # trimesh_h.actor.property.specular_power = 5\n\n # PLOTTING EDGES\n from Florence.PostProcessing import PostProcess\n tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,\n plot_points=True, plot_edges=True, plot_surfaces=False)\n\n x_edges = tmesh.x_edges\n y_edges = tmesh.y_edges\n z_edges = tmesh.z_edges\n connections = tmesh.connections\n\n src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())\n src.mlab_source.dataset.lines = connections\n h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)\n # AVOID WARNINGS\n # lines = mlab.pipeline.stripper(src)\n # h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)\n\n # ELEMENT NUMBERING\n # for i in range(0,self.elements.shape[0]):\n # coord = self.points[self.elements[i,:],:]\n # x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n # y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n # z_avg = np.sum(coord[:,2])/self.elements.shape[1]\n\n # # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)\n # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)\n\n # POINT NUMBERING\n for i in range(self.elements.shape[0]):\n for j in range(self.elements.shape[1]):\n text_obj = mlab.text3d(self.points[self.elements[i,j],0],\n self.points[self.elements[i,j],1],self.points[self.elements[i,j],2],str(self.elements[i,j]),\n color=(0,0,0.),scale=0.05)\n\n\n figure.scene.disable_render = False\n\n if show_plot:\n # mlab.view(*view)\n mlab.show()", "def redraw_whole_plot(self):\n pcent_rand = self.rand\n pcent_decimal = pcent_rand/100\n self.x = np.array([\n n*np.random.uniform(low=1-pcent_decimal, high=1+pcent_decimal) \n for n in np.linspace(3, 9, self.num_points)\n ])\n self.y = np.array([\n n*np.random.uniform(low=1-pcent_decimal, high=1+pcent_decimal)\n for n in np.linspace(3, 9, self.num_points)\n ])\n self.redraw_slope()", "def plot(self,plot='smoothedOnly',includeBP=True):\n\n\t\tif plot=='all':\n\t\t\tfor j in range(0,20):\n\t\t\t\t\tp1=self.plotOfSingleSensor(j,'all').plot()\n \n\t\telse:\n\t\t\tfor j in range(0,8):\n\t\t\t\tif j==0:\n\t\t\t\t\tp1=self.plotOfSingleSensor(j,plot) \n\t\t\t\t\tp3=self.plotOfSingleSensor(12+j,plot) \n\t\t\t\t\tif j<4:\n\t\t\t\t\t\tp2=self.plotOfSingleSensor(8+j,plot) \n\t\t\t\telse:\n\t\t\t\t\tp1.mergePlots(self.plotOfSingleSensor(j,plot))\n\t\t\t\t\tp3.mergePlots(self.plotOfSingleSensor(12+j,plot))\n\t\t\t\t\tif j<4:\n\t\t\t\t\t\tp2.mergePlots(self.plotOfSingleSensor(8+j,plot)) \t\n\t\t\tp1.subtitle='Section 1 SOL Sensors'\t\n\t\t\tp2.subtitle='Section 4 SOL Sensors'\t\n\t\t\tp3.subtitle='Section 8 SOL Sensors'\t\t\t\n\t\t\treturn _plot.subPlot([p1,p2,p3],plot=True)", "def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()", "def test_interpolated():\n # Set random seed for reproducibility\n manualSeed = main.manualSeed\n print(\"Fixed Seed: \", manualSeed)\n random.seed(manualSeed)\n torch.manual_seed(manualSeed)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n netG = torch.load(main.netG_path).to(device)\n for i in range(20):\n p1_continuous = torch.randn(1, main.z_ncontinuous, 1, 1, device=device)\n p1_discrete = torch.randint(0, 2, (1, main.z_ndiscrete, 1, 1), device=device)\n p1 = torch.cat((p1_continuous, p1_discrete), dim=1)\n\n p2_continuous = torch.randn(1, main.z_ncontinuous, 1, 1, device=device)\n p2_discrete = torch.randint(0, 2, (1, main.z_ndiscrete, 1, 1), device=device)\n p2 = torch.cat((p2_continuous, p2_discrete), dim=1)\n plot_generated(interpolate_two_points(p1, p2), netG)", "def draw_stationary(centers, pi, plot_pi_orig=True, output_file=None):\n\n interp_density_plot(centers, pi, plot_pi_orig)\n plt.title('Stationary Distribution')\n\n # export\n if output_file:\n plt.savefig(output_file)\n print('Figure saved as ', output_file)\n else:\n plt.show()", "def plot_outputgrid(self, scalefactor=1, **kwargs):\n\n if not (type(scalefactor) == 'int'):\n scalefactor = round(scalefactor)\n\n xx = np.arange(self.xori, self.xend, scalefactor * self.dx)\n yy = np.arange(self.yori, self.yend, scalefactor * self.dy)\n plt.hlines(yy, self.xori, self.xend, linewidth=0.2, **kwargs)\n plt.vlines(xx, self.yori, self.yend, linewidth=0.2, **kwargs)\n\n logger.debug('Adding output grid to plot')", "def draw_edges():\n\n def bezier(p0, p1, p2, **kwargs):\n x0, y0 = p0\n x1, y1 = p1\n x2, y2 = p2\n xb = [\n (1 - t) ** 2 * x0 + 2 * t * (1 - t) * x1 + t ** 2 * x2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n yb = [\n (1 - t) ** 2 * y0 + 2 * t * (1 - t) * y1 + t ** 2 * y2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n ax.plot(xb, yb, **kwargs)\n\n for edge in self._edges:\n\n u, v = edge\n\n x0, y0, a0 = (\n node_properties[\"node_x\"][u],\n node_properties[\"node_y\"][u],\n node_properties[\"theta\"][u],\n )\n x2, y2, a2 = (\n node_properties[\"node_x\"][v],\n node_properties[\"node_y\"][v],\n node_properties[\"theta\"][v],\n )\n\n angle = a0 + (a2 - a0) / 2\n\n # if angle > np.pi:\n # angle_corr = angle - np.pi\n # else:\n # angle_corr = angle\n\n distance = np.abs(a2 - a0)\n if distance > np.pi:\n distance = distance - np.pi\n distance = (1.0 - 1.0 * distance / np.pi) * R / 2.5\n x1 = distance * np.cos(angle)\n y1 = distance * np.sin(angle)\n x1 = 0\n y1 = 0\n\n ## dibuja los arcos\n bezier(\n [x0, y0], [x1, y1], [x2, y2], **self._edges[edge],\n )", "def test_render_xy_plot():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n render_xy_plot(gdpinfo, [], \"isp_gdp_xy_none.svg\")\n render_xy_plot(gdpinfo, [\"China\"], \"isp_gdp_xy_china.svg\")\n render_xy_plot(gdpinfo, [\"United Kingdom\", \"United States\"],\n \"isp_gdp_xy_uk+usa.svg\")\n render_xy_plot(gdpinfo, [\"India\", \"China\", \"United Kingdom\", \"United States\", \"Aruba\", \"Andorra\", \"Angola\", \"Afghanistan\", \"Albania\"], \"isp_gdp_xy_countries.svg\")", "def plot_individual(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n # fig_title = yprop + item[\"cation_type\"] # Plot by cation\n fig_title = yprop # All together\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111) \n ax.scatter(x,y, s=70, zorder=2, color=color_dict[item[\"cation_type\"]], linewidths=2.5, edgecolors='black')\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dashed')\n elif item[\"path_id\"][-3:] == \"003\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dotted')\n else:\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([0,100])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def draw_nodes(self):\n pass", "def graph_smooth_from_pts():\n fig_name = 'lect2_isom'\n\n # given data\n x = np.array([0.0, 0.2, 0.4, 0.6, 0.65])\n ra = np.array([39.0, 53.0, 59.0, 38.0, 25.0])\n design_eq = np.divide(50.0, ra)\n print(\"Isom example design equation points: {}\".format(design_eq))\n\n # cubic spline\n tck = interpolate.splrep(x, design_eq, s=0)\n x_new = np.linspace(0.0, 0.7, 101)\n y_new = interpolate.splev(x_new, tck, der=0)\n # alternately, from interpolation\n cubic_interp = interpolate.interp1d(x, design_eq, kind='quadratic', fill_value=\"extrapolate\")\n make_fig(fig_name, x, design_eq, ls1='o', x2_array=x_new, y2_array=y_new,\n x3_array=x_new, y3_array=cubic_interp(x_new),\n y1_label=\"data\", y2_label=\"quadratic\", y3_label=\"cubic\",\n x_label=r'conversion (X, unitless)', y_label=r'$\\displaystyle\\frac{F_{A0}}{-r_A} \\left(m^3\\right)$',\n x_lima=0.0, x_limb=0.7, y_lima=0.0, y_limb=2.5,\n )", "def plotSolitonIntersection():\n \n cells = np.loadtxt('cells.txt') #file with several cell [x0 y0 x1 y1 x2 y2 x3 y3], each cell in seperate row (at least 2 needed)\n cells = np.hstack([cells,cells[:,:2]])\n for cell in cells:\n plt.plot(cell[::2],cell[1::2])\n\n np.savetxt('X.txt',sorted(cells.flatten()[::2]))\n subprocess.call('solitonEta 0.3 15 0 X.txt',shell=True)\n soliton = np.loadtxt('eta.txt')\n plt.plot(soliton[:,0],soliton[:,1])\n\n #ipdb.set_trace()\n plt.savefig('boxes.png')\n plt.show()", "def visualize(I, f, c, bc, Lx, Ly, nx, ny, dt, tstop,\n zmin, zmax, plot=1, version='scalar',\n implementation=\\\n {'ic': 'scalar', 'inner': 'scalar', 'bc': 'scalar'}):\n class Visualizer:\n \"\"\"Action function used by solver.\"\"\"\n def __init__(self, plot=0):\n self.plot = plot\n if self.plot:\n self.g = Gnuplot.Gnuplot(persist=1)\n self.g('set parametric')\n self.g('set data style lines')\n self.g('set hidden')\n self.g('set contour base')\n self.g('set zrange [%g:%g]' % (zmin,zmax))\n\n def __call__(self, u, xv, yv, t):\n if self.plot:\n data = Gnuplot.GridData(u, xv[:,0], yv[0,:], binary=0)\n self.g.splot(data)\n self.g('set title \"t=%g\"' % t)\n if self.plot == 2:\n self.g.hardcopy(filename='tmp_%020f.ps' % t,\n enhanced=1, mode='eps', fontsize=14,\n color=0, fontname='Times-Roman')\n time.sleep(0.8) # pause to finish plot\n\n viz = Visualizer(plot)\n dt, cpu_ic, cpu_inner, cpu_bc = \\\n solver(I, f, c, bc, Lx, Ly, nx, ny, dt, tstop,\n user_action=viz, implementation=implementation)\n time.sleep(3)", "def plot_inv_RV_lit(outpath, fit_slopes, fit_intercepts, fit_stds):\n waves = np.arange(0.8, 4.01, 0.001)\n fig, ax = plt.subplots(figsize=(10, 9))\n\n for i, RV in enumerate([2.5, 3.1, 5.5]):\n # plot the extinction curve from this work\n offset = 0.1 * i\n slopes = interpolate.splev(waves, fit_slopes)\n alav = fit_intercepts(waves) + slopes * (1 / RV - 1 / 3.1)\n (line,) = ax.plot(waves, alav + offset, lw=1.5, label=r\"$R(V) = $\" + str(RV))\n stddev = interpolate.splev(waves, fit_stds)\n color = line.get_color()\n ax.fill_between(\n waves,\n alav + offset - stddev,\n alav + offset + stddev,\n color=color,\n alpha=0.2,\n edgecolor=None,\n )\n\n # plot the literature curves\n styles = [\"--\", \":\"]\n for i, cmodel in enumerate([CCM89, F19]):\n ext_model = cmodel(Rv=RV)\n (indxs,) = np.where(\n np.logical_and(\n 1 / waves >= ext_model.x_range[0], 1 / waves <= ext_model.x_range[1]\n )\n )\n yvals = ext_model(waves[indxs] * u.micron)\n ax.plot(\n waves[indxs],\n yvals + offset,\n lw=1.5,\n color=color,\n ls=styles[i],\n alpha=0.8,\n )\n\n # add text\n ax.text(3.45, 0.03, r\"$R(V) = 2.5$\", fontsize=0.8 * fs, color=\"tab:blue\")\n ax.text(3.45, 0.15, r\"$R(V) = 3.1$\", fontsize=0.8 * fs, color=\"tab:orange\")\n ax.text(3.45, 0.305, r\"$R(V) = 5.5$\", fontsize=0.8 * fs, color=\"tab:green\")\n\n # finalize and save the plot\n ax.set_xlabel(r\"$\\lambda\\ [\\mu m$]\", fontsize=1.2 * fs)\n ax.set_ylabel(r\"$A(\\lambda)/A(V)$ + offset\", fontsize=1.2 * fs)\n ax.set_xlim(0.75, 4.05)\n ax.set_ylim(-0.03, 0.98)\n handles = [\n Line2D([0], [0], color=\"k\", lw=1.5),\n Line2D([0], [0], color=\"k\", lw=1.5, ls=\"--\"),\n Line2D([0], [0], color=\"k\", lw=1.5, ls=\":\"),\n ]\n labels = [\n \"this work\",\n \"Cardelli et al. (1989)\",\n \"Fitzpatrick et al. (2019)\",\n ]\n plt.legend(handles, labels, fontsize=fs)\n plt.savefig(outpath + \"inv_RV_lit.pdf\", bbox_inches=\"tight\")\n\n # also save the plot in log scale\n plt.ylim(0.01, 1)\n plt.yscale(\"log\")\n plt.tight_layout()\n plt.savefig(outpath + \"inv_RV_lit_log.pdf\")", "def plot_results(delaytype):\n \n if delaytype == 'Polynomial':\n # results for ind_obs=graph.indlinks.keys()\n tolls_collected1 = [[164.76921455176952, 690.919364787199, 1003.9715271443032], [164.76920846294715, 690.9196375266908, 1003.971503332828]]\n so_costs1 = [[1952.813985915695, 2991.422528084584, 4716.77478205995], [1952.813764068618, 2991.4225756924197, 4716.774942888717]]\n costs1 = [[1961.4088544768533, 3035.621746689017, 4751.282035585294], [1961.4088092322784, 3035.619283467948, 4751.282202058287]]\n ue_costs1 = [[1993.9477013089333, 3087.4591784440836, 4889.555908205202], [1993.9532544651581, 3087.4592286945117, 4889.562102247506]]\n \n # results for ind_obs=[(36,37,1), (13,14,1), (17,8,1), (24,17,1), (28,22,1), (14,13,1), (17,24,1), (24,40,1), (14,21,1), (16,23,1)]\n tolls_collected2 = [[165.05685117271955, 354.1634370314711, 1015.9141858070569], [165.0567022829941, 354.1917223859498, 1015.9119098321756]]\n so_costs2 = [[1952.4102665477528, 2990.2968764530697, 4714.092736177128], [1952.813764068618, 2991.4225756924197, 4716.774942888717]]\n costs2 = [[1961.0182151023969, 3019.353015234613, 4750.051821659878], [1961.4090940730164, 3020.4668352547183, 4752.675436210774]]\n ue_costs2 = [[1993.5324364880598, 3086.401670312004, 4886.656461616093], [1993.9532544651581, 3087.4592286945117, 4889.562102247506]]\n \n # results for ind_obs=[(17,24,1), (24,40,1), (14,21,1), (16,23,1)]\n tolls_collected3 = [[122.37203827953705, 369.5962596851687, 993.1260548913893], [124.00144138458633, 375.65796385498777, 1018.6147484597942]]\n so_costs3 = [[1819.720988976306, 2777.3939885366685, 4677.595258102356], [1952.813764068618, 2991.4225756924197, 4716.774942888717]]\n costs3 = [[1834.6351015726038, 2802.3016403566235, 4714.102145227426], [1965.8049083549738, 3020.126090534662, 4750.071391283308]]\n ue_costs3 = [[1872.3492340077084, 2897.9834368843217, 4886.769726452165], [1993.9532544651581, 3087.4592286945117, 4889.562102247506]]\n \n # results for ind_obs=[(10,9,1), (19,18,1), (4,5,1), (29,21,1)]\n tolls_collected4 = [[602.0313412901077, 596.7300601881633, 833.3678073460065], [609.094482012133, 542.8579545671112, 843.1817520139325]]\n so_costs4 = [[13252.728980891832, 20178.927282098706, 28549.614451166774], [1952.813764068618, 2991.4225756924197, 4716.774942888717]]\n costs4 = [[13254.342842900576, 20239.30287102026, 28549.614731411177], [1992.3969007442322, 3130.0522527298062, 4805.6728306302175]]\n ue_costs4 = [[13259.433117552415, 20185.90937054816, 28556.779443740656], [1993.9532544651581, 3087.4592286945117, 4889.562102247506]]\n \n \n if delaytype == 'Hyperbolic':\n # results for ind_obs=graph.indlinks.keys() \n tolls_collected1 = [[161.15742640370462, 416.80264009965265, 948.124688583883], [161.4013529462645, 417.25218843773354, 950.1674351402539]]\n so_costs1 = [[2519.5329969919685, 3679.4257321044365, 5325.940489651684], [2544.920874812245, 3718.292867749051, 5383.190501656683]]\n costs1 = [[2525.7590340322095, 3679.4261220632625, 5356.8071818038225], [2551.0113714206195, 3718.3028736461647, 5412.310857035818]]\n ue_costs1 = [[2548.8303153671195, 3736.67165067958, 5444.499035875823], [2574.155371287541, 3775.672784740404, 5503.564654849874]]\n \n # results for ind_obs=[(36,37,1), (13,14,1), (17,8,1), (24,17,1), (28,22,1), (14,13,1), (17,24,1), (24,40,1), (14,21,1), (16,23,1)]\n tolls_collected2 = [[355.56391097372205, 701.3544774585599, 806.6640717434449], [374.34816990991186, 736.3763770517197, 842.0768966826593]]\n so_costs2 = [[2174.1387842065237, 3306.974167934956, 4969.5560792612605], [2544.920874812245, 3718.292867749051, 5383.190501656683]]\n costs2 = [[2201.273656230071, 3338.7536674213156, 4988.732654672769], [2564.0153714222406, 3746.115605602468, 5398.046518880896]]\n ue_costs2 = [[2209.622435372754, 3380.7186533697286, 5104.755602389524], [2574.155371287541, 3775.672784740404, 5503.564654849874]]\n \n # results for ind_obs=[(17,24,1), (24,40,1), (14,21,1), (16,23,1)]\n tolls_collected3 = [[339.37838703976513, 737.9887571945254, 930.0531221139565], [353.28118085588176, 763.3708450755438, 955.644104623869]]\n so_costs3 = [[2520.1983680076073, 3833.234908857907, 5645.320052639795], [2544.920874812245, 3718.292867749051, 5383.190501656683]]\n costs3 = [[2536.138622203524, 3862.614483834193, 5668.430527845267], [2562.897499637454, 3750.9322754235955, 5406.402328441233]]\n ue_costs3 = [[2556.0266644367803, 3903.9758481975805, 5741.545038028959], [2574.155371287541, 3775.672784740404, 5503.564654849874]]\n \n # results for ind_obs=[(10,9,1), (19,18,1), (4,5,1), (29,21,1)]\n tolls_collected4 = [[443.85669649193784, 837.5428841560781, 1380.117295219297], [479.57423528502, 931.2309202734045, 1499.6972922042573]]\n so_costs4 = [[2158.995768390798, 3843.4202195503713, 7595.092411648515], [2544.920874812245, 3718.292867749051, 5383.190501656683]]\n costs4 = [[2192.900868595389, 3888.0172745668465, 7626.17585256234], [2575.5748442305207, 3755.844017718783, 5450.304046277645]]\n ue_costs4 = [[2231.1674179937672, 3999.772800567613, 7867.415688946321], [2574.155371287541, 3775.672784740404, 5503.564654849874]]\n \n # effects of toll pricing\n opacity = 0.4\n width = 0.2\n index = np.arange(4)\n plt.xticks(index, ('all', '10', '4 good', '4 bad') )\n colors = ['y', 'r', 'b']\n demands = ['94', '118', '142']\n for i in [0,1,2]:\n so = so_costs1[1][i]\n ue = ue_costs1[1][i]\n delta = ue-so\n plt.bar(index-width/2+(i-1)*width,\n [100.0*(costs1[1][i]-so)/delta, \n 100.0*(costs2[1][i]-so)/delta, \n 100.0*(costs3[1][i]-so)/delta, \n 100.0*(costs4[1][i]-so)/delta],\n width, alpha=opacity, color=colors[i], label='demand='+demands[i])\n plt.xlabel('Sets of observed links')\n plt.ylabel('Relative loss (%)')\n plt.title('Effects of toll pricing with '+delaytype+' delay function')\n plt.legend(loc=0)\n plt.show()\n \n # error in predicted UE\n for i in [0,1,2]:\n ue = ue_costs1[1][i]\n plt.bar(index-width/2+(i-1)*width,\n [100.0*abs(ue_costs1[0][i]-ue)/ue,\n 100.0*abs(ue_costs2[0][i]-ue)/ue,\n 100.0*abs(ue_costs3[0][i]-ue)/ue,\n 100.0*abs(ue_costs4[0][i]-ue)/ue],\n width, alpha=opacity, color=colors[i], label='demand='+demands[i])\n plt.xlabel('Sets of observed links')\n plt.ylabel('Relative error (%)')\n plt.title('Error in predicted UE with '+delaytype+' delay function')\n plt.legend(loc=0)\n plt.show()\n \n # error in predicted SO\n for i in [0,1,2]:\n so = so_costs1[1][i]\n plt.bar(index-width/2+(i-1)*width,\n [100.0*abs(so_costs1[0][i]-so)/so,\n 100.0*abs(so_costs2[0][i]-so)/so,\n 100.0*abs(so_costs3[0][i]-so)/so,\n 100.0*abs(so_costs4[0][i]-so)/so],\n width, alpha=opacity, color=colors[i], label='demand='+demands[i])\n plt.xlabel('Sets of observed links')\n plt.ylabel('Relative error (%)')\n plt.title('Error in predicted SO with '+delaytype+' delay function')\n plt.legend(loc=0)\n plt.show()\n \n # error in predicted tolled ue\n for i in [0,1,2]:\n plt.bar(index-width/2+(i-1)*width,\n [100.0*abs(costs1[0][i]-costs1[1][i])/costs1[1][i],\n 100.0*abs(costs2[0][i]-costs2[1][i])/costs2[1][i],\n 100.0*abs(costs3[0][i]-costs3[1][i])/costs3[1][i],\n 100.0*abs(costs4[0][i]-costs4[1][i])/costs4[1][i]],\n width, alpha=opacity, color=colors[i], label='demand='+demands[i])\n plt.xlabel('Sets of observed links')\n plt.ylabel('Relative error (%)')\n plt.title('Error in predicted tooled UE with '+delaytype+' delay function')\n plt.legend(loc=0)\n plt.show()", "def plot_iter(V, Pi, params):\n n_rows = params['n_rows']\n n_cols = params['n_cols'] \n occ_grid = params['occ_grid']\n R = params['R']\n\n goal = params['goal']\n sink = params['sink']\n\n actions = ['left','right','up','down']\n\n fig1 = plt.figure(1, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if occ_grid[row, col] == 1:\n plt.text(col, n_rows - 1 - row, '0.0', color='k', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n else:\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(V[row, col]), \n color='b', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n\n fig2 = plt.figure(2, clear=True)\n for row in range(n_rows):\n for col in range(n_cols):\n if not Pi[row, col] == -1:\n plt.text(col, n_rows - 1 - row, actions[Pi[row, col]], \n color='k', ha='center', va='center')\n elif np.all([row, col]==goal):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='g', ha='center', va='center')\n elif np.any(np.logical_and(row==sink[:, 0], col==sink[:, 1])):\n plt.text(col, n_rows - 1 - row, \"{:.3f}\".format(R[row, col]), \n color='r', ha='center', va='center')\n plt.axis([-1, n_cols, -1, n_rows])\n plt.axis('off')\n\n fig1.canvas.draw()\n fig1.canvas.flush_events()\n fig2.canvas.draw()\n fig2.canvas.flush_events()", "def prob6(n):\n data = np.load('airdata.npy')\n fx = lambda a, b, n: .5*(a+b + (b-a) * np.cos(np.arange(n+1) * np.pi/n))\n a, b = 0, 366 - 1/24\n domain = np.linspace(0, b, 8784)\n pts = fx(a, b, n)\n temp = np.abs(pts - domain.reshape(8784, 1))\n temp2 = np.argmin(temp, axis=0)\n poly = Barycentric(domain[temp2], data[temp2])\n\n plt.ion()\n plt.subplot(121)\n plt.plot(domain, data)\n plt.title(\"Data\")\n plt.subplot(122)\n plt.plot(domain, poly(domain))\n plt.title(\"Interpolation\")\n plt.show()", "def spore_integrals():\n\n path_tops = os.path.join(path_to_here, '../data/landscape_visualizations/DMSO/{}/{}_hours')\n idx_epochs = [25, 30]\n repeat_names = ['original', 'repeat_a', 'repeat_b']\n pList_data = os.path.join(path_to_here, '../data/embeddings/lims13/lims13_DMSO.pickle')\n pList_data = _load_and_resize_list(pList_data)\n\n\n def _plot_from_idx_repeat(ax, repeat_name, idx_epoch, color, linewidth = 0.6, linestyle = '-'):\n if os.path.isfile(path_tops.format(repeat_name, idx_epoch) + '/p_list_0.pickle'):\n p_list_NN = _load_and_resize_list(path_tops.format(repeat_name, idx_epoch) + '/p_list_0.pickle')\n NN_spore_integrals = [np.sum(i[427:442, 182:196])*(26**2)/(1000**2) for i in p_list_NN]\n ax.plot([1, 2, 3, 4, 5, 6, 7, 8], NN_spore_integrals[1:], c = color, linewidth = linewidth)\n\n\n data_spore_integrals = [np.sum(i[427:442, 182:196])*(26**2)/(1000**2) for i in pList_data]\n\n fig = plt.figure(figsize = (2.3, 1.5))\n for i, idx_epoch in enumerate(idx_epochs):\n ax = fig.add_subplot(len(idx_epochs), 1, i+1)\n ax.plot([1, 2, 3, 4, 5, 6, 7, 8], data_spore_integrals[1:], linewidth = 1, c = 'red')\n if idx_epoch is not idx_epochs[-1]:\n ax.set_xticks([])\n for repeat_name, color in zip(repeat_names, ['blue', 'green', 'orange']):\n _plot_from_idx_repeat(ax = ax, repeat_name = repeat_name, idx_epoch = idx_epoch, color = color)\n if idx_epoch == idx_epochs[-1]:\n _plot_from_idx_repeat(ax = ax, repeat_name = 'original', idx_epoch = 60, color = 'black', linewidth = 1, linestyle = '--')\n\n ax.set_ylabel(r'$\\approx \\int_{spore} \\hat{p}(\\mathbf{x},t)$', fontsize = 6, labelpad = 1)\n ax.set_xlabel('Snapshot', fontsize = 6, labelpad = 1)\n for ax in fig.axes:\n ax.tick_params(axis = 'both', labelsize = 6)\n\n plt.subplots_adjust(hspace = 0)\n plt.tight_layout()\n plt.savefig(path_to_here+'/../outputs/'+'spore_integrals.png', dpi = 1200)", "def plot_analytic_curve(ax, n_HArr, tempArr, pltkwarg, write_text, tx, txloc=[], fs=8):\n\tax.plot(n_HArr, tempArr, **pltkwarg)\n\n\tif write_text == \"temp_coord\":\n\t\tdiff = N.abs(tempArr - txloc)\n\t\tii = N.where(diff == N.min(diff))[0]\n\t\tax.text(n_HArr[ii], tempArr[ii], r\"$%s$\" % tx, fontsize=fs)\n\telif write_text == \"den_coord\":\n\t\tdiff = N.abs(n_HArr - txloc)\n\t\tii = N.where(diff == N.min(diff))[0]\n\t\tax.text(n_HArr[ii], tempArr[ii], r\"$%s$\" % tx, fontsize=fs)\n\telif write_text == \"axis_coord\":\n\t\t\tax.text(txloc[0], txloc[1], r\"$%s$\" % tx, fontsize=fs, horizontalalignment=\"center\", verticalalignment=\"center\", transform=ax.transAxes)\n\telse:\n\t\tpass", "def testing_fixed_point_newton_interp(fixed_point_functions, n, m=400):\n\n # Function to convert to root finding problem given g(x). 'g(x*) = x*' -> 'f(x*) = 0'\n Ffun = lambda Gfun: lambda x: Gfun(x) -x\n\n import matplotlib.pylab as plt\n\n # setting up figure\n num_plots = len(fixed_point_functions)\n\n fig, axs = plt.subplots(1, num_plots, figsize=(15, 6), facecolor='w', edgecolor='k')\n fig.subplots_adjust(hspace = .5, wspace=.001)\n axs = axs.ravel()\n\n i = 0 # 'graph number'\n for Gfun_name, Gfun in fixed_point_functions.items():\n\n # <computation block>\n\n # convert to root finding problem\n f = Ffun(Gfun)\n\n # compute x and y data points\n x = np.linspace(-1,1,n)\n y = f(x)\n\n # compute coefficients of interpolating polynomial\n c = coeffients(x,y)\n\n # evaluate actual function points for graph\n ax = np.linspace(-1,1,m)\n ay = f(ax)\n\n # calculate y values using the interpolating polynomials coefficients\n y_hats = []\n for xi in ax:\n y_hati = np.polyval(c, xi)\n y_hats.append(y_hati)\n\n # <\\computation block>\n\n # create plot for this function\n axs[i].plot( ax, ay, 'k' ) # function in black\n axs[i].plot( ax, y_hats, 'r' ) # interpolating polynomial in red\n axs[i].set_title(Gfun_name)\n\n # increment graph number\n i += 1\n\n plt.show()", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def plot_endpoints( polylines, mymap ):\n map( \\\n lambda start : mymap.addpoint( start[-1][0], start[-1][1], \"#0000FF\") if start != [] else [],\n polylines)", "def show_path_2D(start, end, coordinates, polygons, clear = True):\n global L, N, delta_t\n\n # start interactive mode\n plt.ion()\n\n # crete eempty figure on which data will go and first subplot\n fig = plt.figure()\n\n # get into the correct time step\n for time_step in range(start, end):\n # list of colours used for animation\n colours = cm.rainbow(np.linspace(0, 1, N))\n\n # loop over each particle and colour\n for i in range(N):\n # plot x, y poistion of particle in a given colour and set axis to size of box\n plt.scatter(coordinates[time_step][i][0], coordinates[time_step][i][1], s = 1, color = 'r')\n\n # plot the object\n if i < M:\n polygon = np.array(polygons[time_step][i])\n # get the points of the polygon to plot it\n x, y = polygon.T\n\n # print(x, y)\n\n x = np.append(x, x[0])\n y = np.append(y, y[0])\n\n # print(x, y)\n\n # plot the polygon\n plt.plot(x , y)\n # plt.scatter(polygons_com[time_step][i][0], polygons_com[time_step][i][1], s = 5, color = 'g')\n\n if bound_cond == True:\n plt.axis([0, L, 0, L])\n plt.axis([0, L, 0, L])\n # plt.axis([-L*2, L*2, -L*2, L*2])\n\n # show graph\n plt.show()\n plt.pause(time_pause)\n\n # decide if you want to clear\n if clear == True:\n plt.clf()\n\n return None", "def plot_nodes_over_data_1d_components(fig, X, Y, mdl, e_nodes, p_nodes, e_nodes_cov, p_nodes_cov, saveplot = False):\n\n idim = X.shape[1]\n odim = Y.shape[1]\n numplots = idim + odim\n \n for i in range(idim):\n # ax = fig.add_subplot(gs[i,0])\n ax = fig.axes[i]\n ax.clear()\n ax.hist(X[:,i], bins=20)\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n yran = ylim[1] - ylim[0]\n offset1 = yran * -0.1\n offset2 = yran * -0.25\n # print(\"offsets 1,2 = %f, %f\" % (offset1, offset2))\n ax.plot(X[:,i], np.ones_like(X[:,i]) * offset1, \"ko\", alpha=0.33)\n for j,node in enumerate(e_nodes[:,i]):\n myms = 2 + 30 * np.sqrt(e_nodes_cov[i,i,i])\n # print(\"node\", j, node, myms)\n ax.plot([node], [offset2], \"ro\", alpha=0.33, markersize=10)\n # ax.plot([node], [offset2], \"r.\", alpha=0.33, markersize = myms)\n # x1, x2 = gmm.\n ax.text(node, offset2, \"n%d\" % j, fontsize=6)\n # plt.plot(e_nodes[:,i], np.zeros_like(e_nodes[:,i]), \"ro\", alpha=0.33, markersize=10)\n \n for i in range(idim, numplots):\n # ax = fig.add_subplot(gs[i,0])\n ax = fig.axes[i]\n ax.clear()\n ax.hist(Y[:,i-idim], bins=20)\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n yran = ylim[1] - ylim[0]\n offset1 = yran * -0.1\n offset2 = yran * -0.25\n # print(\"offsets 1,2 = %f, %f\" % (offset1, offset2))\n ax.plot(Y[:,i-idim], np.ones_like(Y[:,i-idim]) * offset1, \"ko\", alpha=0.33)\n for j,node in enumerate(p_nodes[:,i-idim]):\n myms = 2 + 30 * np.sqrt(p_nodes_cov[i-idim,i-idim,i-idim])\n # print(\"node\", j, node, myms)\n ax.plot([node], [offset2], \"ro\", alpha=0.33, markersize=10)\n # ax.plot([node], [offset2], \"r.\", alpha=0.33, markersize = myms)\n ax.text(node, offset2, \"n%d\" % j, fontsize=6)\n \n # plt.plot(p_nodes[:,i-idim], np.zeros_like(p_nodes[:,i-idim]), \"ro\", alpha=0.33, markersize=10)\n\n plt.draw()\n plt.pause(1e-9)\n \n if saveplot:\n filename = \"plot_nodes_over_data_1d_components_%s.jpg\" % (mdl.__class__.__name__,)\n savefig(fig, filename)\n \n fig.show()\n # plt.show()", "def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()", "def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()", "def draw_data(self, method='linear', number_of_contours=10):\r\n if self.data is not None:\r\n # Coordinates for points to interpolate to\r\n xi, yi = np.mgrid[-1:1:100j, -1:1:100j]\r\n\r\n # Electrode positions for data to interpolate from\r\n points = []\r\n for electrode in self.data.index:\r\n name = TopoPlot.normalize_electrode_name(electrode)\r\n points.append(ELECTRODES[name])\r\n\r\n # Interpolate\r\n # TODO: Will not work with 2 electrodes.\r\n zi = griddata(points, self.data.values, (xi, yi), method=method)\r\n\r\n # Defaults\r\n if number_of_contours is None:\r\n number_of_contours = 10\r\n\r\n # Draw\r\n plt.contourf(xi, yi, zi, number_of_contours)\r\n\r\n # TODO: center\r", "def draw_grid(plt):\n x0, x1, x2, x3 = 0, 3057, 6508, 9860\n y0, y1, y2, y3, y4, y5, y6, y7, y8 = 0, 1535, 2041, 2547, 3053, 3559, 4257, 5303, 6978\n alpha, linewidth = 0.3, 0.5\n\n # Vertical Lines\n plt.plot((x0, x0), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x1), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x2, x2), (y0, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x3, x3), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n\n # Horizontal Lines\n plt.plot((x0, x3), (y0, y0), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y1, y1), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y2, y2), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y3, y3), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y4, y4), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y5, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x1), (y6, y6), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x3), (y7, y7), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y8, y8), 'black', alpha=alpha, linewidth=linewidth)", "def plot_spn(root: Node, f: Union[IO, os.PathLike, str]):\n # Convert the SPN to a NetworkX directed graph\n graph = spn_to_digraph(root)\n\n # Build the dictionaries of node labels and colors\n labels = dict()\n colors = dict()\n for node_id in graph.nodes:\n attr = graph.nodes[node_id]\n name = attr['class']\n if name == Sum.__name__:\n label = '+'\n color = '#083d77'\n for child_id, _ in graph.in_edges(node_id):\n idx = graph.edges[child_id, node_id]['idx']\n graph.edges[child_id, node_id]['weight'] = round(attr['weights'][idx], ndigits=2)\n elif name == Product.__name__:\n label = 'x'\n color = '#bf3100'\n else:\n label = repr(attr['scope']).replace(',', '')\n color = '#542188'\n labels[node_id] = label\n colors[node_id] = color\n\n # Compute the nodes positions using PyDot + Graphviz\n pos = nx_pydot.graphviz_layout(graph, prog='dot')\n pos = {node_id: (x, -y) for node_id, (x, y) in pos.items()}\n pos = rescale_layout_dict(pos)\n\n # Set the figure size\n figdim = np.maximum(2, np.sqrt(graph.number_of_nodes() + 2 * graph.number_of_edges()))\n plt.figure(figsize=(figdim, figdim))\n\n # Draw the nodes and edges\n nx.draw_networkx(\n graph, pos=pos, node_color=[colors[node_id] for node_id in graph.nodes],\n labels=labels, arrows=True, font_size=8, font_color='#ffffff'\n )\n nx.draw_networkx_edge_labels(\n graph, pos=pos, edge_labels=nx.get_edge_attributes(graph, 'weight'),\n rotate=False, font_size=8, font_color='#000000'\n )\n\n # Plot the final figure\n plt.tight_layout()\n plt.axis('off')\n plt.savefig(f, bbox_inches='tight', pad_inches=0)\n plt.clf()", "def plot_obj_func():\n X1 = [i for i in range(-63, 65, 1)]\n Y1 = [8 * math.sin(0.06 * x) + 8 * math.cos(0.14 * x) + 8 * math.exp(math.cos(0.2*x)) for x in X1]\n plt.plot(X1, Y1)\n plt.show()", "def interpolation(x, y, x_new, model=\"InterpolatedUnivariateSpline\", plot=False, title=\"\"):\n\n num_col = y.shape[1]\n\n if isinstance(x_new, float):\n num_row = 1\n else:\n num_row = len(x_new)\n\n y_new = np.zeros((num_row, num_col))\n y_new_dot = np.zeros((num_row, num_col))\n\n if model == \"InterpolatedUnivariateSpline\":\n\n for idx in range(0, num_col):\n\n # TODO: InterpolatedUnivariateSpline seems to have problems with multidimensional arrays\n interpolate_ = interpolate.InterpolatedUnivariateSpline(x, y[:, idx], k=3)\n y_new[:, idx] = interpolate_(x_new)\n # y_new_dot[:,idx] = spline.derivative()(x_new) #TODO: Does this work too?\n y_new_dot[:, idx] = _determine_time_derivate(interpolate_, x_new, 1)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n elif model == \"interp1d\":\n\n # TODO: Extrapolation has to be handled (e.g. with fill_value=(0.0,0.0) argument)\n interpolate_ = interpolate.interp1d(x, y, kind=\"cubic\", axis=0, bounds_error=False)\n y_new = interpolate_(x_new)\n y_new_dot = _determine_time_derivate(interpolate_, x_new, num_col)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n elif model == \"lagrange\":\n\n for idx in range(0, num_col):\n\n # Rescaling of data necessary, because Lagrange interpolation is numerical unstable\n xm = np.mean(x)\n ym = np.mean(y[:, idx])\n xs = np.std(x)\n ys = np.std(y[:, idx])\n xscaled = (x - xm) / xs\n yscaled = (y[:, idx] - ym) / ys\n\n # interpolate_ = interpolate.lagrange(xscaled, yscaled) # worst performance\n # interpolate_ = _lagrange2(xscaled, yscaled) # improved performance\n interpolate_ = _lagrange(xscaled, yscaled) # fastest performance\n y_new[:, idx] = interpolate_((x_new - xm) / xs) * ys + ym\n\n # Determine derivate of 'y_new'\n t_offset = 0.5\n y1 = interpolate_((x_new - t_offset - xm) / xs) * ys + ym\n y2 = interpolate_((x_new + t_offset - xm) / xs) * ys + ym\n\n y_new_dot[:, idx] = (y2 - y1) / (t_offset * 2)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n elif model == \"BarycentricInterpolator\":\n\n for idx in range(0, num_col):\n\n # TODO: Is the rescaling of data necessary here?\n xm = np.mean(x)\n ym = np.mean(y[:, idx])\n xs = np.std(x)\n ys = np.std(y[:, idx])\n xscaled = (x - xm) / xs\n yscaled = (y[:, idx] - ym) / ys\n\n interpolate_ = interpolate.BarycentricInterpolator(xscaled, yscaled)\n y_new[:, idx] = interpolate_((x_new - xm) / xs) * ys + ym\n\n # Determine derivate of 'y_new'\n t_offset = 0.5\n y1 = interpolate_((x_new - t_offset - xm) / xs) * ys + ym\n y2 = interpolate_((x_new + t_offset - xm) / xs) * ys + ym\n\n y_new_dot[:, idx] = (y2 - y1) / (t_offset * 2)\n\n if plot == True:\n _plot_interpolation(x, y, x_new, y_new, title=title)\n\n # elif model == 'polyfit':\n\n # #y_new_dot = np.zeros((len(x_new), num_col))\n # degree = 2\n # poly_dot_coeff = np.zeros((degree, num_col))\n #\n\n # #TODO: Polynomial fit over the complete time period of 3 days are not convenient.\n # # Difference of up to 1000 km between original SP3 orbit data and polynomial\n # # fit.\n\n # x_ref = np.amin(x) #TODO: orb_ref_time should be part of 'orb' Dataset\n # x = (x - x_ref) * 86400\n # poly_coeff = poly.polyfit(x, y, degree)\n # y_new = poly.polyval(x_new,poly_coeff)\n\n # for idx in range(0,num_col):\n # poly_dot_coeff[:,idx] = np.polyder(poly_coeff[:,idx])\n # polynomial_dot = np.poly1d(poly_dot_coeff[:,idx])\n # y_new_dot[:,idx] = polynomial_dot((x_new - x_ref) * 86400)\n\n # if plot == True:\n # _plot_orbit_polynomial(sat, poly_coeff, x, y)\n\n return y_new, y_new_dot", "def cubicSpline(x,y,x_int):\n\n #region \"learn\" the coefficients of the cubic polynomials that interpolate intervals in x.\n # amount of intervals/splines\n n = len(x)-1\n\n # a_i = y_i\n a = y[:-1]\n\n # h_i = x_{i+1} - x_i for i in 0..n-1\n h = x[1:]-x[:-1]\n\n # 2 * h_i + h_{i+1}\n diagA = 2*(h[1:]+h[:-1])\n \n # h_1..h_n-2\n hInA = h[1:-1]\n\n A = np.eye(n-1)*diagA\n # distribute h_1..h_n-2 above and underneath the diagonal\n A += np.diag(hInA,1)\n A += np.diag(hInA,-1)\n\n # construct RHS\n z = 3/h[1:] * (y[2:] - y[1:-1]) - 3/h[:-1] * (y[1:-1] - y[:-2])\n\n # c_0 = c_{n} = 0\n c = np.zeros(n+1)\n\n c[1:-1] = np.linalg.solve(A,z)\n \n b = (y[1:]-y[:-1])/h - h/3*(c[1:] + 2*c[:-1])\n\n d = 1/(3*h)*(c[1:]-c[:-1])\n #endregion\n\n #region interpolate all points in x_int\n y_int = x_int.copy()\n # for all intervals\n for i in range(len(x)-1):\n # find points to interpolate within given interval\n idx = np.where(np.logical_and(x[i]<= x_int,x_int < x[i+1]))[0]\n xx = x_int[idx]\n yy = np.polyval(np.array([d[i],c[i],b[i],a[i]]), xx-x[i])\n y_int[idx] = yy\n print(f'interpolating in interval [{x[i]},{x[i+1]}[')\n print(xx)\n print(yy)\n print('\\n')\n\n # edgecase where x_int contains exactly last interval border\n #find indicies if x_int contains dupes\n idx = np.where(x_int == x[len(x)-1])[0] \n # interpolate with last interval polynomial\n i = len(a)-1\n y_int[idx] = np.polyval(np.array([d[i],c[i],b[i],a[i]]), x_int[idx]-x[i])\n #endregion\n return y_int", "def _plot_addition_layers(ax, n_voxels, mapper_file, with_curvature,\n with_rois):\n with h5py.File(mapper_file, mode='r') as hf:\n if with_curvature and \"flatmap_curvature\" in hf.keys():\n curvature = load_hdf5_array(mapper_file, key='flatmap_curvature')\n background = np.swapaxes(curvature, 0, 1)[::-1]\n else:\n background = map_voxels_to_flatmap(np.ones(n_voxels), mapper_file)\n ax.imshow(background, aspect='equal', cmap='gray', vmin=0, vmax=1,\n zorder=0)\n\n if with_rois and \"flatmap_rois\" in hf.keys():\n rois = load_hdf5_array(mapper_file, key='flatmap_rois')\n ax.imshow(\n np.swapaxes(rois, 0, 1)[::-1], aspect='equal',\n interpolation='bicubic', zorder=2)", "def make_plot_solved(lx, ly, tour):\n make_plot_original(lx, ly)\n for i in range(1, len(tour)):\n p2 = tour[i]\n p1 = tour[i - 1]\n plt.plot([lx[p1], lx[p2]], [ly[p1], ly[p2]], 'k-')", "def plot_linear(x_range, w, b):\n\tplt.plot(x_range, x_range * w + b)", "def equationPlot(self):\n clf()\n x = np.arange(0,9.9,0.1)\n plot(x,1/(10-x))\n xlabel('X')\n ylabel('1/(10-x)')\n savefig('equation.png')", "def plot_resiliences(nodes, network_vals, er_vals, upa_vals):\n node_vals = range(0, nodes)\n\n plt.plot(node_vals, network_vals, '-b', label='Network')\n plt.plot(node_vals, er_vals, '-r', label='ER')\n plt.plot(node_vals, upa_vals, '-g', label='UPA')\n\n plt.legend(loc='upper right')\n plt.ylabel('Size of Largest Connected Component')\n plt.xlabel('Number of Nodes Removed')\n plt.grid(True)\n plt.title('Comparison of Graph Resilience\\nMeasured by Largest Connected Component vs Nodes Removed by Target Attack\\n')\n plt.show()", "def plot_interpolated_cog(self, teff, logg, vt, ews=None):\n if not self.interpolated:\n raise ValueError(\"Your model hasn't been interpolated yet\")\n import matplotlib.pyplot as plt\n if not ews:\n ews = np.arange(1, 100, 0.1)\n generated_mets = [self.load_model().predict([[teff, logg, vt, ew]])[0] for ew in ews]\n fig = plt.plot(ews, generated_mets)\n plt.xlabel(\"EW(m$\\AA$)\")\n plt.ylabel(\"A(Fe)\")\n return fig", "def plot_iteration(ax, x_n, y_n, f, \n max_labels=6, resfct = 100, include_chords=True,\n left_extra=0.01, right_extra=0.01):\n if include_chords:\n # create a list including chord points:\n x_c = sum([[x, x] for x in x_n[1:]], [x_n[0]])\n y_c = sum([[0, y] for y in y_n[1:]], [y_n[0]])\n else:\n x_c = x_n\n y_c = y_n\n # the iteration results\n ax.scatter(x_c, y_c, marker='x', color='red', s=30)\n\n # the convergence pattern\n ax.plot(x_c, y_c, color='green', ls='--')\n\n # add some labels\n # figure out a reasonable offset for labels\n dxt = (np.max(x_n)-np.min(x_n))/50.\n dyt = (np.max(y_n)-np.min(y_n))/50.\n # only plot a maximum of max_labels labels, so plot doesn't get too messy\n for i,(x,y) in enumerate(zip(x_n, y_n)):\n ax.text(x_n[i]+dxt, y_n[i]+dyt, '$x_{}$'.format(i), fontsize=16)\n if i == max_labels:\n break\n\n # the function\n x = np.linspace(np.min(x_n) - left_extra, np.max(x_n) + right_extra, resfct)\n ax.plot(x, f(x), 'b', label='$F(x)$')\n\n # zero line\n xlim = ax.get_xlim()\n ax.plot([xlim[0], xlim[1]], [0., 0.], 'k--')\n # add ticks for the x_n\n for x in x_n:\n ax.plot([x, x], [-dyt, dyt], 'k')\n ax.set_xlim(xlim)\n ax.set_xlabel('$x$', fontsize=16)\n ax.set_ylabel('$y=F(x)$', fontsize=16)", "def plot_inverse(self,show=True,nx=129):\n xs = np.linspace(0.0,1.0,nx)\n plt.plot(xs,np.vectorize(self.if0)(xs),'b-')\n plt.plot([0.0,1.0],[self.rho_max,self.rho_max],'b:')\n plt.plot(xs,np.vectorize(self.if1)(xs),'r-')\n plt.plot([0.0,1.0],[self.rho_min,self.rho_min],'r:')\n plt.plot([0.0,1.0],[0.0,1.0],'k--')\n #plt.plot([0.0,1.0],[self.rho,self.rho],'g:')\n plt.xlim(0.0,1.0)\n plt.ylim(0.0,1.0)\n plt.axes().set_aspect(1.0)\n if show:\n plt.show()", "def test_poly_plot(self):\n clf()\n filename = 'poly_plot.png'\n t1 = Polygon([(0, 0), (1, 0), (1, 1)])\n t2 = Polygon([(1, 0), (2, 0), (2, 1)])\n polys = GeoSeries([t1, t2])\n ax = polys.plot()\n self._compare_images(ax=ax, filename=filename)", "def interpPlot(self):\n self.expInt, a = self.interpData(None, self.expData)\n self.simInt, b = self.interpData(self.optimSim)\n self.residual = abs(self.expInt.data - self.simInt.data)\n\n plt.figure()\n self.sasPlot(self.expInt, sim=self.simInt.data, resid=self.residual)\n\n return", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def xyPlot(xPlotFunc\n\t\t\t,yPlotFunc\n\t\t\t,table\n\t\t\t,filterList\n\t\t\t,ax\n\t\t\t,legendLabel=None\n\t\t\t,labelFunc=None\n\t\t\t,title=None\n\t\t\t,commonConstraints=[completed]\n\t\t\t,codeList=['ro--','gx--','b^--','ms--','y*--','ko--','co--','ro:','gx:','b^:','ms:','y*:','ko:','co:','ro-','gx-','b^-','ms-','y*-','ko-','co-']):\n\txys=[]\n\tfor i,constraintList in enumerate(filterList):\t\n\t\txs = [xPlotFunc.func(*x) for x in plotQuery(table,xPlotFunc.cols,constraintList+commonConstraints)]\n\t\tys = [yPlotFunc.func(*y) for y in plotQuery(table,yPlotFunc.cols,constraintList+commonConstraints)]\n\t\tif labelFunc is not None: \n\t\t\tlabel = [labelFunc.func(*l) for l in plotQuery(table,labelFunc.cols,constraintList+commonConstraints)]\n\t\ttry: \n\t\t\txy= sorted(zip(xs,ys)) #order the pairs\n\t\t\tx,y = zip(*xy)\n\t\t\tax.plot(x,y,codeList[i%len(codeList)],label='' if legendLabel is None else legendLabel[i])\n\t\t\tif labelFunc is not None: \n\t\t\t\tfor i in range(len(x)):\tax.annotate(label[i],xy=(x[i],y[i]),fontsize=9)\n\t\texcept ValueError: print \"Warning, no data found for constraint #\"+str(i+1)\n\t\txys.append(xy)\n\tif title is not None: ax.set_title(title)\n\n\tax.set_xlabel(xPlotFunc.axisLabel)\n\tax.set_ylabel(yPlotFunc.axisLabel)\n\t\n\tif legendLabel is not None: \n\t\tlegend = ax.legend(loc='best', shadow=True)\n\t\tlegend.get_frame().set_facecolor('#00FFCC')\n\t\tlegend.draggable()\n\treturn xys", "def update_plot():\n pass", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def plot_2d_topomap_inter(ax):\n\n # plot first Head \n N = 300 # number of points for interpolation\n xy_center = [-0.178,0] # center of the plot\n radius = 0.1 # radius\n\n # draw a circle\n circle = matplotlib.patches.Circle(xy = xy_center, radius = radius, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n # add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [-0.19,0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [-0.19,-0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n xy = [[-0.087,-0.027],[-0.087,0.027], [-0.068,0]]\n polygon = matplotlib.patches.Polygon(xy = xy, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon) \n \n\n # Plot second Head \n x2y2_center = [0.178,0] # center of the plot\n radius2 = 0.1 # radius\n \n # draw a circle\n circle = matplotlib.patches.Circle(xy = x2y2_center, radius = radius2, edgecolor = \"k\", facecolor = \"w\")\n ax.add_patch(circle)\n \n # make the axis invisible \n for loc, spine in ax.spines.items():\n spine.set_linewidth(0)\n \n # remove the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n \n ## add some body parts. Hide unwanted parts by setting the zorder low\n # add two ears\n circle = matplotlib.patches.Ellipse(xy = [0.19,0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n circle = matplotlib.patches.Ellipse(xy = [0.19,-0.095], width = 0.05, height = 0.025, angle = 0, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(circle)\n ## add a nose\n x2y2 = [[0.087,-0.027],[0.087,0.027], [0.068,0]]\n polygon = matplotlib.patches.Polygon(xy = x2y2, edgecolor = \"k\", facecolor = \"w\", zorder = 0)\n ax.add_patch(polygon)", "def plot(self):\n pass", "def ice_plot(curves, **kwargs):\n \n n,m = curves.shape\n \n #grid of plots\n gridspec_kw = {'height_ratios':[3, 1], \n 'wspace':0.0, \n 'hspace':0.0}\n fig, (ax_curves, ax_feature) = plt.subplots(2,1, \n gridspec_kw=gridspec_kw, \n figsize=kwargs.get('figsize', (6,6)))\n \n # top graph - curves\n for curve in curves:\n ax_curves.plot(np.arange(m), curve)\n ax_curves.set_xticklabels([])\n ax_curves.set_xlim((0, m))\n ax_curves.set_ylabel(kwargs.get('ylabel', 'decision function $\\Delta$'))\n \n # bottom graph - feature values\n if 'feature_values' in kwargs:\n ax_feature.plot(np.arange(m), kwargs.get('feature_values'))\n ax_feature.set_xlim((0, m)) \n ax_feature.set_xlabel(kwargs.get('xlabel', '$X_S$ values'))\n \n return fig, (ax_curves, ax_feature)", "def prob4():\n domain = np.linspace(-1, 1, 400)\n n_array = [2**i for i in range(2,9)]\n f = lambda x: 1 / (1 + 25*x**2)\n plt.ion()\n poly_error = []\n cheby_error = []\n for n in n_array:\n x = np.linspace(-1, 1, n)\n poly = BarycentricInterpolator(x)\n poly.set_yi(f(x))\n poly_error.append(la.norm(f(domain) - poly(domain), ord=np.inf))\n y = np.array([(1/2)*(2*np.cos(j*np.pi/n)) for j in range(n+1)])\n cheby = BarycentricInterpolator(y)\n cheby.set_yi(f(y))\n cheby_error.append(la.norm(f(domain) - cheby(domain), ord=np.inf))\n plt.loglog(n_array, poly_error, label=\"equally spaced points\", basex=2)\n plt.loglog(n_array, cheby_error, label=\"Chebyshev extremal points\", basex=2)\n plt.legend()\n plt.show()", "def n_ord_interp(x, y, deg=5):\n return np.polyfit(x, y, deg)", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def plot_activity(opts, points, activity, labels, plot_state=False):\n sort_ix = sort_weights(opts)\n activity[:,opts.state_size:] = activity[:,opts.state_size+sort_ix]\n\n x = np.arange(0, opts.state_size)\n # x = np.linspace(np.amin(points[:, 0]), np.amax(points[:, 0]))\n scale = 2 * np.pi / opts.state_size\n x_rad = x * scale\n cos, sin = np.cos(x_rad), np.sin(x_rad)\n if opts.velocity:\n y = np.linspace(np.amin(points[:, 1]), np.amax(points[:, 1]))\n else:\n y = np.zeros(1)\n\n x_mesh, y_mesh = np.meshgrid(x, y)\n cos, _ = np.meshgrid(cos, y)\n sin, _ = np.meshgrid(sin, y)\n if plot_state:\n nc, nr = 5, 4\n neurons = np.arange(opts.state_size) # state neurons\n else:\n nc, nr = 5, 8\n neurons = np.arange(opts.state_size, opts.rnn_size) # extra neurons\n\n\n f_linear, ax_linear = plt.subplots(ncols=nc, nrows=nr)\n # plt.suptitle('Linear Interpolated Data')\n\n c, r = 0, 0\n for i, n in enumerate(neurons):\n z_lin = griddata(points[:, :2], activity[:, n], (x_mesh, y_mesh), method='linear')\n plt.sca(ax_linear[r, c])\n # plt.title('Neuron {}'.format(n))\n plt.contourf(x, y, z_lin, cmap='RdBu_r')\n plt.axis('off')\n\n # find the global centroid\n if np.nanmax(z_lin) <= 0:\n z_lin -= np.nanmean(z_lin) # center activations at the median\n\n z_lin[np.isnan(z_lin)] = 0\n z_lin[z_lin < 0] = 0\n norm = np.sum(z_lin)\n\n cos_mean = np.sum(cos * z_lin) / norm\n sin_mean = np.sum(sin * z_lin) / norm\n com_rad = np.arctan2(sin_mean, cos_mean)\n com_x = (com_rad / scale) % 20\n com_y = np.sum(y_mesh * z_lin) / norm\n # plt.scatter(com_x, com_y, c='k')\n\n c += 1\n if c == nc:\n c = 0\n r += 1\n if r == nr:\n break\n # plt.tight_layout()\n plt.show()", "def plot_graph(costs):\n plt.figure()\n for i in range(len(np.array(costs).T)):\n plt.plot(np.array(costs)[:, i])\n plt.title(\"Costs\")\n plt.show()", "def plotFeatures(self):\n fl=np.array(self.xp)*0.0+0.25*self.farr.max()\n self.splines=self.axes.plot(self.xp, fl , ls='', marker='|', ms=20, color='#00FF00')\n #set up the text position\n tsize=0.83\n self.ymin, self.ymax = self.axes.get_ylim()\n ppp=(self.ymax-self.ymin)/(self.arcfigure.figure.get_figheight()*self.arcfigure.figure.get_dpi())\n f=self.ymax-10*tsize*ppp\n for x,w in zip(self.xp, self.wp):\n w='%6.2f' % float(w)\n self.axes.text(x, f, w, size='small', rotation='vertical', color='#00FF00')", "def plot_kinetics_eo(k_data, i_data, eo_data, tlim=None, xlim=None, lb=10, mpp=0.33, seg_length=100, fps=10, plot=True):\n \n t = [] \n power = []\n \n # apply tlim\n if tlim == None:\n pass\n elif isinstance(tlim, int):\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ tc < tlim]\n i_data = i_data.loc[i_data.t / fps < tlim]\n eo_data = eo_data.loc[eo_data.t < tlim]\n elif isinstance(tlim, list) and len(tlim) == 2:\n assert(tlim[1]>tlim[0])\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ (tc < tlim[1]) & (tc >= tlim[0])]\n i_data = i_data.loc[(i_data.t / fps < tlim[1]) & (i_data.t / fps >= tlim[0])]\n eo_data = eo_data.loc[(eo_data.t < tlim[1]) & (eo_data.t >= tlim[0])]\n else:\n raise ValueError('tlim should be None, int or list of 2 int') \n \n # compute exponents at different time\n # t, power will be plotted on ax1\n for idx in k_data.segment.drop_duplicates():\n subdata = k_data.loc[k_data.segment==idx]\n xx, yy = postprocess_gnf(subdata, lb, xlim=xlim, sparse=3)\n x = np.log(xx)\n y = np.log(yy)\n p = np.polyfit(x, y, deg=1)\n t.append((idx-1)*seg_length/fps)\n power.append(p[0])\n\n # rescale light intensity to (0, 1)\n # t1, i will be plotted on ax2\n t1 = i_data.t / fps\n i = i_data.intensity - i_data.intensity.min()\n i = i / i.max()\n # t2, E will be plotted on ax3\n t2 = eo_data.t\n E = eo_data.E * mpp * mpp\n # t2, O will be plotted on ax4\n O = eo_data.OP\n \n data = {'t0': t, 'alpha': power, 't1': t1, 'i': i, 't2': t2, 'E': E, 'OP': O}\n \n if plot == True:\n # set up fig and ax\n fig = plt.figure()\n ax1 = fig.add_axes([0,0,1,1])\n ax2 = ax1.twinx()\n ax3 = ax1.twinx()\n ax4 = ax1.twinx()\n\n # plot t, power\n color = 'black'\n ax1.set_xlabel('$t$ [s]')\n ax1.set_ylabel('$\\\\alpha$', color=color)\n ax1.plot(t, power, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n # plot t1, intensity\n color = wowcolor(0)\n ax2.set_ylabel('$I$', color=color)\n ax2.plot(t1, i, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n # plot t2, E\n color = wowcolor(2)\n ax3.set_ylabel('$E$ [$\\mu$m$^2$/s$^2$]', color=color)\n ax3.plot(t2, E, color=color)\n ax3.tick_params(axis='y', labelcolor=color)\n ax3.spines[\"right\"].set_position((\"axes\", 1.1))\n\n # plot t2, O\n color = wowcolor(8)\n ax4.set_ylabel('$OP$', color=color)\n ax4.plot(t2, O, color=color)\n ax4.tick_params(axis='y', labelcolor=color)\n ax4.spines[\"right\"].set_position((\"axes\", 1.2))\n\n ax = [ax1, ax2, ax3, ax4] \n return data, fig, ax\n else:\n return data", "def EPI():\n TE = np.array([4.22, 33.81, 63.39, 92.98, 122.6, 152.2, 181.7, 211.3, 240.9, 270.5])\n upper_left = np.array([697.3, 367.0, 217.5, 115.8, 51.8, 23.2, 14.8, 8.7, 6.1, 4.6])\n center = np.array([1110.2, 907.8, 813.6, 745.2, 692.8, 637.0, 564.9, 521.0, 450.2, 401.6])\n lower_right = np.array([723.0, 419.2, 224.1, 126.4, 61.8, 32.4, 15.1, 8.8, 3.9, 3.8])\n upper_center = np.array([782.2, 499.4, 279.5, 154.5, 88.6, 58.2, 43.8, 38.2, 38.2, 36.0])\n\n area = [upper_left, center, upper_center, lower_right]\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\"]\n name = [\"Upper left area\", \"Center area\", \"Up center area\", \"Lower right area\"]\n x_new = np.linspace(4.22, 270.5, 10000)\n for i, j, k in zip(area, colors, name):\n popt, _ = curve_fit(M_xy, TE, i, p0=np.array([200, 300]))\n M0, T2 = popt[0], popt[1]\n y_new = M_xy(x_new, M0, T2)\n plt.scatter(TE, i)\n plt.plot(x_new, y_new, \"--\", c=j, label=\"Fit: %s\" % k + f\", $T_2$={T2:.2f}\")\n plt.legend(loc=\"best\")\n plt.grid()\n plt.ylabel(\"Mean Signal Intensity\")\n plt.xlabel(\"TE [ms]\")\n plt.show()", "def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()", "def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()", "def see_what_its_doing_1d():\n all_points = create_points_with_random_pollution_1d(100, 100, 10)\n picked_points = pick_uniform_random_points(all_points, 20)\n interpolated_points = interpolate_unknown_points(picked_points, all_points)\n\n picked_x = []\n picked_pollution = []\n for label, point in picked_points.items():\n picked_x.append(label)\n picked_pollution.append(point.get_pollution_value())\n\n interp_x = []\n inter_pollution = []\n\n for label, point in interpolated_points.items():\n if not label in picked_x:\n interp_x.append(label)\n inter_pollution.append(point.get_pollution_value())\n\n plt.plot(picked_x, picked_pollution, \"ro\", interp_x, inter_pollution, \"go\")\n plt.xlabel(\"Point Label\")\n plt.ylabel(\"Pollution Value\")\n plt.show()", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def plot_graph(self):\r\n A = self.a_grid ; V = self.V1 ; Pol = self.Pol\r\n A_opt = A[Pol.astype(int)]\r\n \r\n fig = plt.subplots(figsize = (8,5))\r\n ax = [None,None]\r\n pltgrid = (1,2)\r\n \r\n ax[0] = plt.subplot2grid(pltgrid, (0,0))\r\n ax[1] = plt.subplot2grid(pltgrid, (0,1))\r\n \r\n ax[0].plot(A[:],V[:,0,0], linewidth = 2, color = 'blue', label = r'$V(a)$: Low $w$')\r\n ax[0].plot(A[:],V[:,0,5], linewidth = 2, color = 'green', label = r'$V(a)$: Median $w$')\r\n ax[0].plot(A[:],V[:,0,-1], linewidth = 2, color = 'red', label = r'$V(a)$: High $w$')\r\n \r\n ax[1].plot(A[:],A_opt[:,0,0], linewidth = 2, color = 'blue', label = r'$a\\'(a)$: Low $w$')\r\n ax[1].plot(A[:],A_opt[:,0,5], linewidth = 2, color = 'green', label = r'$a\\'(a)$: Median $w$')\r\n ax[1].plot(A[:],A_opt[:,0,-1], linewidth = 2, color = 'red', label = r'$a\\'(a)$: High $w$')\r\n ax[1].plot(A[:],A[:], linewidth = 2, color = 'violet', linestyle = 'dashed', zorder = 1)\r\n \r\n \r\n ax[0].set_xlabel(r'$a$') ; ax[0].legend()\r\n ax[1].set_xlabel(r'$a$') ; ax[1].legend()\r\n ax[0].set_title('Value function')\r\n ax[1].set_title('Asset policy')\r\n \r\n plt.tight_layout()\r\n plt.show()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def draw_lists_pyplot(y_array, line_weight=3, learnig_rate=1):\n y = y_array\n plt.plot(y, lw=line_weight, label='cost(a={:})'.format(learnig_rate))\n plt.legend()\n\n plt.title(\"Gradient Descent Optimizing Method\\nminimize cost function\")\n plt.xlabel('time-itoration')\n plt.ylabel('cost-function')\n\n plt.xlim(0,)\n plt.ylim(0,)\n\n plt.grid(b=None, which='major', axis='both')\n plt.show()", "def changePlotter(graph, contribution, rounds, args, titles=None, y_labels=None, cols=5):\n sizes = [graph.degree(i)*10 for i in range(graph.order())]\n if not titles:\n titles = [f\"Round: {i}\" for i in rounds]\n\n # Dynamics subplot to show developement of the network\n N = len(rounds)\n cols = cols\n rows = N // cols + 1 if N % cols else N // cols\n #print(N, cols, rows)\n\n gs = gridspec.GridSpec(rows, cols)\n fig = plt.figure(figsize=(1.2*rows*6, 1.2*cols))\n for i in range(N):\n ax = fig.add_subplot(gs[i])\n ax.set_title(titles[i])\n \n colors = contribution[rounds[i],:]\n if args.network == \"WS\":\n nx.draw_circular(graph, with_labels=False, ax=ax, node_size=sizes, node_color=colors, vmin=0, vmax=100)\n elif args.network == \"BA\":\n nx.draw_kamada_kawai(graph, with_labels=False, ax=ax, node_size=sizes, node_color=colors, vmin=0, vmax=100)\n\n if y_labels:\n # Custom y labels\n ax.set_axis_on()\n if i%cols==0:\n ax.set_ylabel(y_labels[i//cols])\n ax.tick_params(\n axis='x',\n which='both',\n bottom=False,\n top=False,\n labelbottom=False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n\n \n return plt", "def draw_lists_pyplot(y_array, line_weight=3, learnig_rate=1):\n y = y_array\n plt.plot(y, lw=line_weight, label='cost(a={:})'.format(learnig_rate))\n plt.legend()\n\n plt.title(\"Gradient Descent Optimizing Method\\nminimize cost function\")\n plt.xlabel('time-itoration')\n plt.ylabel('cost-function')\n plt.xlim(0,)\n plt.ylim(0,)\n\n plt.grid(b=None, which='major', axis='both')\n plt.show()", "def plot_params(self,sel='all',niter=None) :\n\t\talpha = np.array([param['alpha'] for param in self.params])\n\t\tbeta = np.array([param['beta'] for param in self.params])\n\t\tg = np.array([param['g'] for param in self.params])\n\t\th = np.array([param['h'] for param in self.params])\n\t\tpdict = {\n\t\t\t'alpha' : alpha,\n\t\t\t'beta' : beta,\n\t\t\t'g' : g,\n\t\t\t'h' : h\n\t\t}\t\n\n\t\t# Plot true params\n\t\tif sel == 'all' : \n\t\t\tfor key in pdict.keys() : \n\t\t\t\tkey_t = self.ar.ss._ss_params[key][self.eqn-1]\n\t\t\t\tif type(key_t) is list : \n\t\t\t\t\tfor t in key_t : \n\t\t\t\t\t\tpl.axhline(y=t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\t\t\t\telse : \n\t\t\t\t\tpl.axhline(y=key_t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\t\telse : \n\t\t\tkey_t = self.ar.ss._ss_params[sel][self.eqn-1]\n\t\t\tif type(key_t) is list : \n\t\t\t\tfor t in key_t : \n\t\t\t\t\tpl.axhline(y=t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\t\t\telse : \n\t\t\t\tpl.axhline(y=key_t,color=\"black\",linewidth=2,\\\n\t\t\t\t\t\tlinestyle=\"dashed\")\n\n\n\t\t# Plot estimated params\n\t\tif sel == 'all' : \n\t\t\tfor key in pdict.keys() : \n\t\t\t\tpl.plot(pdict[key],label=key)\n\t\t\tpl.legend()\n\t\t\tpl.xlabel(\"Iteration\")\n\t\t\tpl.ylabel(\"Params\")\n\t\telse : \n\t\t\tpl.plot(pdict[sel],label=sel)\n\t\t\tpl.legend()\n\t\t\tpl.xlabel(\"Iteration\")\n\t\t\tpl.ylabel(sel)\n\t\tpl.show()", "def plot(self):\n\t\tself.plotOfXray().plot()", "def Interpolate(ax, ay, x, npoints):\r\n\r\n assert(ax[1]>ax[0]) # test for ascending order, at least for first point\r\n \r\n if (verbose): \r\n print 'interpolate/extrapolate to x=',x,', npoints=',npoints\r\n\r\n # Find best data points to use, based on which are closest to \r\n # requested point x. Will find <npoints> (or fewer) best data points and \r\n # return as an array.\r\n ibest = FindBest(ax,x,npoints)\r\n npoints = len(ibest) # make sure npoints is updated in case was reduced\r\n if (verbose): \r\n print 'ibest',ibest\r\n\r\n # Build the polynomial y(x), evaluated at the point x.\r\n y = 0.0\r\n for i in range(npoints): # do i=0,npoints-1\r\n li = 1.0\r\n ni = ibest[i] # index to ith best point\r\n # build up li[x] term, evaluated at the point x\r\n for j in range(npoints): # do j=0,npoints-1\r\n if (i != j): # exclude j=i term\r\n nj = ibest[j] # index to jth best point\r\n li = li*(x-ax[nj])/(ax[ni]-ax[nj])\r\n y = y+ay[ni]*li\r\n \r\n return y", "def _draw_plot(self, *args, **kw):\n # Simple compatibility with new-style rendering loop\n return self._draw_component(*args, **kw)", "def plot(self,ax,**kwargs):\n self.XP_Plotter.plot(ax,**kwargs)\n self.lines_theory[0], = ax.plot(self.xx, self.pp_non_rel,'--g',**kwargs)\n self.lines_theory[1], = ax.plot(self.xx, self.pp_rel,'--m',**kwargs)\n self.lines_theory[2], = ax.plot(self.xx_itpl, self.pp_itpl,'-r',**kwargs)" ]
[ "0.61234254", "0.5854555", "0.58188057", "0.5757321", "0.57179767", "0.5697689", "0.5670117", "0.56530726", "0.56499636", "0.5633391", "0.56109315", "0.56016254", "0.5506603", "0.5506603", "0.5494239", "0.5491141", "0.5464488", "0.54549974", "0.5449367", "0.5433634", "0.5379167", "0.5357248", "0.5311231", "0.52913535", "0.52781713", "0.5265587", "0.524423", "0.52378047", "0.5233742", "0.522808", "0.5207182", "0.5207113", "0.51999825", "0.51871216", "0.51866496", "0.518426", "0.5183842", "0.51763684", "0.51708066", "0.51687497", "0.51582474", "0.51438403", "0.5143542", "0.5111256", "0.5103801", "0.50990057", "0.50901234", "0.5089568", "0.506782", "0.5067609", "0.50672626", "0.5048831", "0.5041659", "0.5040778", "0.50402766", "0.50292885", "0.50226015", "0.5018432", "0.5017327", "0.5012406", "0.50051826", "0.5004681", "0.5001928", "0.4998199", "0.49976835", "0.4984884", "0.4984525", "0.49819428", "0.49810442", "0.49787122", "0.4963254", "0.49580917", "0.4956682", "0.49544936", "0.49526304", "0.49522263", "0.4952054", "0.49489096", "0.49474975", "0.49473605", "0.49432537", "0.4941063", "0.49400645", "0.49390024", "0.4937093", "0.49334246", "0.49326748", "0.49326748", "0.49326748", "0.49305108", "0.49288332", "0.4928293", "0.49278963", "0.49270025", "0.49242288", "0.49220133", "0.49127772", "0.4911173", "0.49103642", "0.49094045" ]
0.7956468
0
This method runs a process and logs the output to both a log file and stdout
def _subprocess_check_log_output(cmd, cwd, logfile): _LOG.info("Execute (%s): %s", cwd, cmd) cmd_base = cmd[0] if isinstance(cmd, (list, tuple)) else cmd.split(" ", 1)[0] proc = subprocess.Popen( cmd, cwd=cwd, shell=True, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8", ) stdout = "" with open(logfile, "a") as f: msg = ( "\n" + "-" * 80 + f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}: Execute ({cwd}): {cmd}\n" + "-" * 80 ) f.write(msg) stdout += msg + "\n" while True: data = proc.stdout.readline() stdout += data _LOG.debug("%s: %s", cmd_base, data.rstrip("\n")) f.write(data) # process is done if there is no data and the result is valid if not data: # EOF break proc.wait() if proc.returncode != 0: raise RuntimeError(f"Subprocess failed: {cmd}\nstdout:\n{stdout}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run(self, args, cwd=None, env=None, logmode='wb'):\n args = tuple(str(arg) for arg in args)\n if self.process and self.process.poll() is None: \n raise RuntimeError(\"A process is already running\")\n if self.logfile:\n self.logfile.close()\n self.logfile = open(self.logfilename, logmode, buffering=0)\n if env is not None:\n env = dict(os.environ, **env, \n PYTHONPATH=os.pathsep.join(sys.path))\n \n self.process = subprocess.Popen(args, cwd=cwd, stdout=self.logfile,\n stderr=subprocess.STDOUT, env=env)", "def run_process(cmd, out_log=None, err_log=None):\r\n return run_multi_processes([cmd], out_log=out_log, err_log=err_log)", "def run(self):\n self._config_log()\n # Ugly patch to avoid pyvisa complaining about missing filters\n warnings.simplefilter(\"ignore\")\n\n # Redirecting stdout and stderr to the logging system.\n logger = logging.getLogger()\n redir_stdout = StreamToLogRedirector(logger)\n sys.stdout = redir_stdout\n redir_stderr = StreamToLogRedirector(logger, 'stderr')\n sys.stderr = redir_stderr\n logger.info('Logger parametrised')\n\n logger.info('Process running')\n self.pipe.send('READY')\n while not self.process_stop.is_set():\n\n # Prevent us from crash if the pipe is closed at the wrong moment.\n try:\n\n # Wait for a measurement.\n while not self.pipe.poll(2):\n if self.process_stop.is_set():\n break\n\n if self.process_stop.is_set():\n break\n\n # Get the measure.\n name, config, build, runtime, mon_entries = self.pipe.recv()\n\n # Build it by using the given build dependencies.\n root = build_task_from_config(config, build, True)\n\n # Give all runtime dependencies to the root task.\n root.run_time = runtime\n\n logger.info('Task built')\n\n # There are entries in the database we are supposed to\n # monitor start a spy to do it.\n if mon_entries:\n spy = MeasureSpy(\n self.monitor_queue, mon_entries,\n root.task_database)\n\n # Set up the logger for this specific measurement.\n if self.meas_log_handler is not None:\n logger.removeHandler(self.meas_log_handler)\n self.meas_log_handler.close()\n self.meas_log_handler = None\n\n log_path = os.path.join(\n root.get_from_database('default_path'),\n name + '.log')\n if os.path.isfile(log_path):\n os.remove(log_path)\n self.meas_log_handler = RotatingFileHandler(log_path,\n mode='w',\n maxBytes=10**6,\n backupCount=10)\n aux = '%(asctime)s | %(levelname)s | %(message)s'\n formatter = logging.Formatter(aux)\n self.meas_log_handler.setFormatter(formatter)\n logger.addHandler(self.meas_log_handler)\n\n # Pass the events signaling the task it should stop or pause\n # to the task and make the database ready.\n root.should_pause = self.task_pause\n root.paused = self.task_paused\n root.should_stop = self.task_stop\n root.task_database.prepare_for_running()\n\n # Perform the checks.\n check, errors = root.check(test_instr=True)\n\n # They pass perform the measure.\n if check:\n logger.info('Check successful')\n root.perform_(root)\n result = ['', '', '']\n if self.task_stop.is_set():\n result[0] = 'INTERRUPTED'\n result[2] = 'Measure {} was stopped'.format(name)\n else:\n result[0] = 'COMPLETED'\n result[2] = 'Measure {} succeeded'.format(name)\n\n if self.process_stop.is_set():\n result[1] = 'STOPPING'\n else:\n result[1] = 'READY'\n\n self.pipe.send(tuple(result))\n\n # They fail, mark the measure as failed and go on.\n else:\n mes = 'Tests failed, see log for full records.'\n self.pipe.send(('FAILED', 'READY', mes))\n\n # Log the tests that failed.\n fails = errors.iteritems()\n message = '\\n'.join('{} : {}'.format(path, mes)\n for path, mes in fails)\n logger.critical(message)\n\n # If a spy was started kill it\n if mon_entries:\n spy.close()\n del spy\n\n except IOError:\n pass\n\n # Clean up before closing.\n logger.info('Process shuting down')\n if self.meas_log_handler:\n self.meas_log_handler.close()\n self.log_queue.put_nowait(None)\n self.monitor_queue.put_nowait((None, None))\n self.pipe.close()", "def run(command, params={}):\n pass_params = {'stdout': PIPE, 'stderr': PIPE}\n pass_params.update(params)\n process = Popen(command, **pass_params)\n System.log_subprocess_output(process)", "def run(self):\n self.process.start()", "def run(self, path='', log_console_output=True):\n # os.system('mkdir -p LOGS/'+self.name)\n self.write_inlist()\n \n if log_console_output:\n # Put console output into separate file\n os.system('.'+path+'/rn > LOGS/'+self.name+'.out')\n else:\n os.system('.'+path+'/rn')\n os.system('echo '+self.name+' >> track_finished')", "def _run_process(self, command_string, timeout=None):\n args = shlex.split(command_string)\n with open(self.logger.log_debug_err, 'ab') as stderr, open(self.logger.log_debug_out, 'ab') as stdout:\n try:\n subprocess.check_call(args, stdout=stdout, stderr=stderr, timeout=timeout)\n except (subprocess.TimeoutExpired, subprocess.CalledProcessError):\n return\n return True", "def main():\n args = parse_args()\n level = logging.INFO\n if args.quiet:\n level = logging.ERROR\n if args.debug:\n level = logging.DEBUG\n LOG.setLevel(level)\n\n log_handler = logging.handlers.RotatingFileHandler(\n args.logfile, maxBytes=LOG_MAX_BYTES, backupCount=LOG_BACKUP_COUNT)\n OUTPUT.addHandler(log_handler)\n log_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt=DATE_FORMAT))\n\n prefix, ext = os.path.splitext(args.logfile)\n error_log_handler = logging.handlers.RotatingFileHandler(\n prefix + '-error' + ext, maxBytes=LOG_MAX_BYTES, backupCount=LOG_BACKUP_COUNT)\n error_log_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt=DATE_FORMAT))\n error_log_handler.setLevel(logging.WARNING)\n LOG.addHandler(error_log_handler)\n\n processors = []\n LOG.debug(\"Initializing processor objects\")\n for host in args.hosts:\n try:\n processors.append(Processor(host))\n except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as error:\n LOG.warning(\"Ignoring %s due to %s\", host, error)\n continue\n if len(args.hosts) != len(processors):\n LOG.error(\"%d host(s) will be ignored for this session.\", len(args.hosts) - len(processors))\n\n OUTPUT.info(\"(Process started)\")\n while True:\n try:\n for proc in processors:\n proc.sample()\n proc.compare()\n except (KeyboardInterrupt, SystemExit) as error:\n OUTPUT.info(\"(Process exiting)\")\n sys.exit(0)\n LOG.debug(\"Sleeping %s %0.1f\", args.interval)\n time.sleep(args.interval)", "def run(path):\n\n cwd = os.getcwd()\n\n os.chdir(path)\n\n if platform.system == \"Windows\":\n os.popen(\"run.bat >> log-\" +\n datetime.datetime.now().isoformat().split(\".\")[0].replace(':', '-') + \".txt\")\n else:\n os.popen(\"./run >> log-\" +\n datetime.datetime.now().isoformat().split(\".\")[0].replace(':', '-') + \".txt\")\n\n while is_alive() == False:\n time.sleep(0.1)\n\n os.chdir(cwd)", "def run(self, stdout=None, stderr=None):", "def _logging_subprocess(self):\n\n # Setup logging for logging subprocess\n setproctitle('flowbber - logging manager')\n\n # # Level\n level = self.LEVELS.get(self._verbosity, logging.DEBUG)\n\n # # Format\n if level != logging.DEBUG:\n format_tpl = self.FORMAT\n else:\n format_tpl = self.FORMAT_DEBUG\n formatter = ColoredFormatter(fmt=format_tpl, style='{')\n\n # # Handler\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n # # Configure baisc logging\n logging.basicConfig(handlers=[handler], level=level)\n\n # Start listening for logs and prints\n listener = QueueListener(self._log_queue, handler)\n listener.start()", "def run(self):\n # Daemonize\n self.daemonize()\n\n # Create helper sink\n logger.add(\n Config.getpath('log.file'),\n level=Config.get('log.level'), colorize=True, enqueue=True,\n format='<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> |'\n '<yellow>{process.name: <23}</yellow> | '\n '<level>{message}</level> (<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan>)',\n rotation=timedelta(days=1), retention=timedelta(days=30), compression='gz')\n\n # Set process title\n self.setprocname()\n\n # Set signal handlers\n # self.sigreg(signal.SIGHUP, self._reload)\n self.sigreg(signal.SIGINT, self.sighandler)\n self.sigreg(signal.SIGTERM, self.sighandler)\n\n # Write PID file\n with open(Config.getpath('pidfile'), 'w+') as pidfile:\n pidfile.write(str(os.getpid()))\n\n # Load children processes\n self._children = self._loadchildren()\n\n # While not stopping\n while self._stop is False:\n # Monit instances\n self._monit()\n\n time.sleep(1)\n\n logger.debug('Terminating...')\n\n # Stop all children whose instance is not None\n children = [proc for _, _, proc in self._children if proc]\n\n # While children have not stopped\n while children:\n\n for index, proc in enumerate(children):\n\n logger.debug(f'Terminating child: {proc.name} with pid {proc.pid}...')\n\n # Send SIGTERM to child process\n os.kill(proc.pid, signal.SIGINT if isinstance(proc, PingAPI) else signal.SIGTERM)\n\n # On join fail, SIGKILL child process\n proc.join(timeout=1)\n\n # If child has not stopped, give it time\n if proc.is_alive() or proc.exitcode is None:\n continue\n\n # Remove children\n children.pop(index)\n\n # Remove pidfile and socket\n with contextlib.suppress(FileNotFoundError):\n os.unlink(Config.getpath('pidfile'))", "def run(self):\n logger.info(\"Running...\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def run(self):\r\n self.log(texto=f\"Executando {self._name}\")", "def run(self):\n logging.debug(\"Executing: {0!r}...\".format(self.command_str))\n\n self.process = subprocess.Popen(\n self.command_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n start = datetime.now()\n result = self.process.communicate()\n end = datetime.now()\n self.time = end - start\n\n if self.verbose:\n stdout, stderr = result\n message = [\"Output:\\n\" \"- returncode:\\n{0}\".format(self.process.returncode)]\n if stdout:\n message.append(\"- stdout:\\n{0}\".format(stdout))\n if stderr:\n message.append(\"- stderr:\\n{0}\".format(stderr))\n logging.debug(\"\\n\".join(message))\n\n self.stdout = stdout\n self.stderr = stderr\n\n return self", "def running_output(process, outputs):\n state = type(\"State\",\n (object, ),\n {\n \"printed_message\": False,\n \"read_first_byte\": False\n })\n\n def output_printer(file_handle):\n \"\"\"Thread that prints the output of this process.\"\"\"\n character = bytearray()\n while True:\n character += file_handle.read(1)\n try:\n if character:\n if not state.read_first_byte:\n state.read_first_byte = True\n\n if character != \"\\n\":\n IndentedLogger.message(\"\\n\")\n\n # If this fails, then we will just read further characters\n # until the decode succeeds.\n IndentedLogger.message(character.decode(\"utf-8\"))\n state.printed_message = True\n character = bytearray()\n else:\n return\n except UnicodeDecodeError:\n continue\n\n stdout = threading.Thread(target=output_printer, args=(outputs[0], ))\n\n stdout.start()\n stderr_lines = list(outputs[1])\n\n try:\n status = process.wait()\n finally:\n stdout.join()\n\n # Print a new line before printing any stderr messages\n if len(stderr_lines):\n IndentedLogger.message(\"\\n\")\n\n for line in stderr_lines:\n IndentedLogger.message(line.decode(\"utf-8\"))\n state.printed_message = True\n\n if state.printed_message:\n print_message(\"\\n\")\n\n return status", "def _run_extractor(self):\n \n # create the command to run.\n cli_args = [self.extractor_path, self.account_name, self.pst_file, self.output_path]\n if self.use_mono:\n cli_args.insert(0, \"mono\")\n self.logger.debug(\"Running command: {}\".format(\" \".join(cli_args)))\n \n # if @self.use_mono is False (i.e. Windows), hide the console window per:\n # https://stackoverflow.com/a/1016651\n # See also: https://docs.python.org/3/library/subprocess.html#windows-popen-helpers\n startup_info = None\n if not self.use_mono:\n startup_info = subprocess.STARTUPINFO()\n startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n\n # run @self.extractor_app; based on: https://stackoverflow.com/a/803396\n process = subprocess.Popen(cli_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n startupinfo=startup_info, universal_newlines=True)\n \n # prepare to capture each character outputted from @self.extractor_app.\n line_parts = []\n\n while process.poll() is None:\n \n # save output to @line_parts as long as the output is not a line break.\n # if the output is a line break, @line_parts is converted to a string and logged\n # and @line_parts is cleared.\n for std_out in process.stdout.read(1):\n if std_out != \"\\n\":\n std_out = std_out.encode(self.charset).decode(self.charset, \n errors=\"replace\")\n line_parts.append(std_out)\n process.stdout.flush()\n else:\n line = \"\".join(line_parts)\n line_parts[:] = []\n self._log_subprocess_line(line)\n\n # raise an exception if @process returns a positive integer (i.e. fails).\n if process.returncode > 0:\n self.logger.debug(\"Child process stderr: {}\".format(process.stderr.readlines()))\n msg = \"Command failed with return code: {}\".format(process.returncode)\n raise ChildProcessError(msg)\n\n return", "def process_tool_output(self, proc, log_fn, name):\n output, error = proc.communicate()\n if proc.returncode != 0:\n fd = open(log_fn, \"a+\")\n fd.write(\"Error: %s returned the following output:\"\n \"\\n%s\" % (name, error))\n fd.close()\n raise Exception(\"%s returned the following output:\"\n \"\\n%s\" % (name, error))\n else:\n fd = open(log_fn, \"a+\")\n fd.write(\"%s output: %s %s\" % (name, output, error))\n # printing stdout and stderr though bowtie2 currently will always\n # output to stderr instead of stdout even if no error occurred\n fd.close()", "def process():", "def execute(command, stdout, stderr=sys.stdout):\n # Does tail work to watch stdout to logging service?\n proc = subprocess.Popen(\n command, shell=True, stdout=stdout, stderr=stderr)\n proc.wait()", "def commandLog(self,):\n \n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n\n #\n # Add run to runs table and open connection to logfile\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, False, MASTER)\n self.openLogfileConnection()\n SEAseqPipeLine.logfile.write(self.createLogHeader())\n \n # default all types of commands run\n runTypes = self.availableCommands.keys()\n \n SEAseqPipeLine.logfile.write('Writing commandLog to standard out.\\n')\n print 'Getting runs performed with the following commands '+', '.join(runTypes[:-1])+' or '+runTypes[-1]+'.'\n print '# StartTime: \\tFinished:\\tCommand:'\n for startTime, command, commandLine, finishedSuccessfully, masterPid in self.database.getRuns(runTypes):\n print str(startTime)+' \\t'+str(bool(finishedSuccessfully))+' \\t'+str(commandLine)\n \n #\n # update runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n SEAseqPipeLine.logfile.write('Finished exiting.\\n')", "def run_cmd_and_log(cmd, log_msg, log_file_path, err_on_fail=True):\n logger.info(log_msg)\n logger.info(f\"log file can be found at {log_file_path}\")\n logger.info(cmd.replace(\"\\\\\", \"\\\\\\\\\"))\n with open(log_file_path, \"w\") as log_file:\n process = subprocess.Popen(cmd,\n stdout=log_file,\n stderr=log_file,\n shell=True,\n universal_newlines=True)\n\n return_code = process.wait()\n if return_code != 0 and err_on_fail:\n raise ChildProcessError(\n f\"The following command failed {cmd} \\n The log file can be found at {log_file_path}\")", "def run(self):\n\n # Start the video stream process\n self._process.start()", "def run_and_log_output(cmd_string):\n logging.info('Running %s', cmd_string)\n c = iterpipes.cmd(cmd_string)\n out = iterpipes.run(c)\n for line in out:\n logging.info(line)", "def test_run_process(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo result > result\n \"\"\")\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert path.isfile(\"result\")", "def run(self, filename):\n\n # make a tempdir so I can use a file-like object\n # instead of an OS-level handle (like with mkstemp)\t\t\n tdir = make_tempdir()\n tname = os.path.join(tdir,'process.out')\n\n #print \"TEMPDIR = \",tdir\n #print \"TEMPFILE = \",tname\n \n f_out = open(tname,'wb')\n f_in = open(filename,'rb')\n\n # process in->out\n self.process(f_out, f_in)\n\n del f_out\n del f_in\n\n # copy tempfile -> filename\n\n #print \"COPY %s -> %s\" % (tname,filename)\n \n # I think this is secure ... since caller owns filename\n # there isn't a race, right? (unlike writing into a tempdir\n # which could have malicious symlinks in it)\n copy2( tname, filename )\n\n #print \"RMDIR %s\" % tname\n \n # clean up tempdir\n unlink(tname)\n os.rmdir(tdir)", "def start():\n logging.info(\"Execution Started\")", "def runCommand(command, outputPrefix=\"ProcessRunner> \"):\n proc = ProcessRunner(command)\n proc.mapLines(WriteOut(sys.stdout, outputPrefix=outputPrefix), procPipeName=\"stdout\")\n proc.mapLines(WriteOut(sys.stderr, outputPrefix=outputPrefix), procPipeName=\"stderr\")\n proc.wait()\n returnCode = proc.poll()\n\n # proc.terminate()\n # proc.shutdown()\n\n return returnCode", "def _printAndRun(self, logger, prefix, command, check=False):\n logger.info(prefix + \"Run: {}\".format(command), False)\n subprocess.run(command, check=check)", "def log(self, *args):\n self.log_stdout(*args)\n print(*args, file=self.general_log_file.file)\n self.general_log_file.flush()", "def process():\n pass", "def writeLog(pid):\n\tglobal processes,logfile,strikes,sleep\n\tproc = processes[pid]\n\tlogfile.write('[%s] %d %s %f%%cpu %f%%mem (over %d s): %s\\n'%(time.strftime('%b %d %H:%M:%S'),pid,proc.user,proc.cpu,proc.mem,proc.count*sleep,proc.command))", "def run(self):\n if self.log_file: # if path of SSH-log file is valid\n # Rotate & parse the log file\n self.parse_log_file()\n # Analyze the log for deviating algorithm\n self.check_manipulation()", "def LogProcess(self):\n time = datetime.today().strftime('%a %Y%b%d %X')\n# Get user name.\n f = os.popen(\"whoami\",\"r\")\n user = f.read().strip()\n f.close()\n\n entry = '%s\\t%s\\t%s\\t%s\\n' % (time, self.topdir, user, self.version)\n\n if ismounted(c.exams_file):\n# Append info to the exams file.\n try:\n f = open(c.exams_file,'a+')\n f.seek(0, 2)\n f.write(entry)\n f.close()\n except:\n# Not a huge problem if this doesn't work.\n pass", "def logcmdoutput(self, p, logit):\n output, error = p.communicate()\n if logit:\n self.logtxt(output.decode('utf-8','replace'), 'output')\n if error.decode():\n self.logtxt(\"ERROR: \" + error.decode('utf-8','replace'), 'error')", "def run (self):\t\r\n\t\tif self.mode != MODE_RUNNING:\r\n\t\t\tself.mode = MODE_RUNNING\r\n\t\twhile self.mode == MODE_RUNNING : \r\n\t\t\tif self.continue_work () == 0 : \r\n\t\t\t\tself.mode = MODE_DISABLE \r\n\t\tif self.fio != 0:\r\n\t\t\tfor i in range(len(logfile)):\r\n\t\t\t\tself.fio.write(logfile[i])", "def run(project, logger, cmd_name, command):\n dir_logs = project.expand('$dir_logs')\n pybuilder.utils.mkdir(dir_logs)\n out_file = os.path.join(dir_logs, '{0}.log'.format(cmd_name))\n err_file = os.path.join(dir_logs, '{0}.err'.format(cmd_name))\n with open(out_file, 'w') as out:\n with open(err_file, 'w') as err:\n retcode = subprocess.call(command, shell=True, stdout=out, stderr=err)\n if retcode:\n logger.error(\"{2} failed. See {0} and {1} for details.\"\n .format(out_file, err_file, cmd_name))\n raise Exception(\"{0} Failed\".format(cmd_name))", "def run(self):\n self.log.info(\"Starting thread: \" + self.name)\n self.object__ = self.run_process(self.object__, self.args)", "def use_log_file(log_file, process_name=None):\n\n\tif process_name is None:\n\t\tmy_process_name = stylize(ST_NAME, 'foundations.use_log_file')\n\telse:\n\t\tmy_process_name = stylize(ST_NAME, process_name)\n\n\tlogging.progress(_(u'{0}({1}): using {2} as log channel.').format(\n\t\t\t\t\tmy_process_name, stylize(ST_UGID, os.getpid()),\n\t\t\t\t\tstylize(ST_PATH, log_file if log_file else 'stdout')))\n\n\tif log_file:\n\t\tout_log = file(log_file, 'ab+')\n\n\telse:\n\t\tout_log = sys.stdout\n\n\tdev_null = file(os.devnull, 'rw')\n\n\t# not needed.\n\t#sys.stdout.flush()\n\t#sys.stderr.flush()\n\n\t# also not needed.\n\t#os.close(sys.stdin.fileno())\n\t#os.close(sys.stdout.fileno())\n\t#os.close(sys.stderr.fileno())\n\n\tos.dup2(dev_null.fileno(), sys.stdin.fileno())\n\tos.dup2(out_log.fileno(), sys.stdout.fileno())\n\tos.dup2(out_log.fileno(), sys.stderr.fileno())", "def start(self):\n if self.SILENT_TIMEOUT < self.MINIMUM_SILENT_TIMEOUT:\n raise AssertionError('Maximum recursion depth exceeded in %r' % self)\n\n sys.stdout.flush()\n sys.stderr.flush()\n self._output = tempfile.NamedTemporaryFile(delete=False, bufsize=0,\n prefix='chromite-parallel-')\n self._parent_pid = os.getpid()\n return multiprocessing.Process.start(self)", "def build(self):\n my_cmd = self.process_cmd % (self.frun_path)\n Msg.user(\"Process Command: %s\" % (str(my_cmd)), \"LAUNCHER\")\n my_log = PathUtils.include_trailing_path_delimiter(self.frun_dir) + self.process_log\n return my_cmd, my_log", "def start_process(cmd, supress_output=False):\n logging.debug(cmd)\n logging.error(\"[tony]cmd:%r\" % (cmd))\n proc = subprocess.Popen(cmd, stdout=None, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n rtn_code = proc.returncode\n\n if supress_output is False:\n if out:\n logging.info(out)\n if err:\n logging.error(err)\n\n if rtn_code == 0 or rtn_code is None:\n logging.info('Success: Process return code %s', str(rtn_code))\n else:\n logging.error('Error: Process return code %s', str(rtn_code))\n sys.exit(1)", "def start(self):\n log.startLoggingWithObserver(self.emit, setStdout=0)", "def run(self, command):\n try:\n print(f\"RUNNING: {command}\")\n print(\"-\" * 80)\n print(subprocess.check_output(command, shell=True).decode('utf-8'))\n except subprocess.CalledProcessError as e:\n print(f\"ERROR calling '{command}'\")\n print(\"-\" * 20)\n print(e.output and e.output.decode('utf-8'))\n sys.exit(-1)", "def doTask(self):\n\n def signal_cb(s, f):\n os._exit(0)\n\n for s in signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT:\n signal.signal(s, signal_cb)\n\n # write pidfile\n def atexit_cb():\n print(\"Exit fork\")\n\n atexit.register(atexit_cb)\n\n # Start the write\n i = 0\n while self.pid == 0 or not self.do_fork:\n print(self.msg % os.getpid())\n time.sleep(2)\n i += 1", "def _runProcess(self, cmd, echoStdout = True, **kwargs):\n # Can't use unicode!\n cmd = str(cmd)\n defaultKwargs = {\n 'universal_newlines': True\n }\n if echoStdout:\n defaultKwargs['stdout'] = subprocess.PIPE\n # Don't buffer the output, but echo it as it comes in regardless\n # of newlines, etc\n defaultKwargs['bufsize'] = 1\n else:\n defaultKwargs['stdout'] = tempfile.TemporaryFile()\n defaultKwargs['stderr'] = subprocess.STDOUT\n defaultKwargs.update(kwargs)\n\n env = os.environ.copy()\n env['PATH'] = self.settings['context_build_path'] + ':' + env['PATH']\n env.update(defaultKwargs.get('env', {}))\n defaultKwargs['env'] = env\n\n p = subprocess.Popen(shlex.split(cmd), **defaultKwargs)\n if echoStdout:\n try:\n import fcntl\n fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)\n except ImportError:\n # Windows?\n pass\n if callable(echoStdout):\n outputCallback = echoStdout\n else:\n outputCallback = lambda l: self.writeOutput(l, end = '')\n\n stdThread = threading.Thread(target = self._dumpStdout,\n args = (p, outputCallback))\n stdThread.start()\n while p.poll() is None:\n if self._shouldStop():\n break\n time.sleep(0.1)\n if p.poll() is None:\n # Exited due to shouldStop\n self.writeOutput(\"\\n\\nAborting tests...\")\n while p.poll() is None:\n try:\n p.terminate()\n except OSError:\n # Died already\n pass\n time.sleep(0.1)\n\n if echoStdout:\n # Finish getting output\n stdThread.join()\n\n if not echoStdout:\n tf = defaultKwargs['stdout']\n tf.seek(0)\n return tf", "def run(self):\n self.empty_pid_file()\n self.queue = Queue()\n self.monitor_process = Process(\n target=ResourceMonitor.monitor_function,\n args=(self.launcher, self.pid_file, self.frequency, self.queue)\n )\n self.monitor_process.start()", "def log(self, i):\n if i % 100 == 0:\n sys.stderr.write(\"process \" + str(os.getpid()) + \": at timestep \" + str(i) + \"\\n\")", "def main(args):\n # Results: print to console and also write to output file\n pass", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def run(self):\n for line in iter(self.pipeReader.readline, ''):\n logging.log(self.level, line.strip('\\n'))\n\n self.pipeReader.close()", "def _Run(self):\n # Register a handler for a signal that is rarely used.\n def trigger_bt(_sig_num, frame):\n logging.error('pre-kill notification (SIGXCPU); traceback:\\n%s',\n ''.join(traceback.format_stack(frame)))\n signal.signal(signal.SIGXCPU, trigger_bt)\n\n sys.stdout.flush()\n sys.stderr.flush()\n errors = []\n # Send all output to a named temporary file.\n with open(self._output.name, 'w', 0) as output:\n # Back up sys.std{err,out}. These aren't used, but we keep a copy so\n # that they aren't garbage collected. We intentionally don't restore\n # the old stdout and stderr at the end, because we want shutdown errors\n # to also be sent to the same log file.\n _orig_stdout, _orig_stderr = sys.stdout, sys.stderr\n\n # Replace std{out,err} with unbuffered file objects.\n os.dup2(output.fileno(), sys.__stdout__.fileno())\n os.dup2(output.fileno(), sys.__stderr__.fileno())\n sys.stdout = os.fdopen(sys.__stdout__.fileno(), 'w', 0)\n sys.stderr = os.fdopen(sys.__stderr__.fileno(), 'w', 0)\n\n try:\n self._started.set()\n results_lib.Results.Clear()\n\n # Reduce the silent timeout by the prescribed amount.\n cls = self.__class__\n cls.SILENT_TIMEOUT -= cls.SILENT_TIMEOUT_STEP\n\n # Actually launch the task.\n self._task(*self._task_args, **self._task_kwargs)\n except failures_lib.StepFailure as ex:\n errors.extend(failures_lib.CreateExceptInfo(\n ex, traceback.format_exc()))\n except BaseException as ex:\n errors.extend(failures_lib.CreateExceptInfo(\n ex, traceback.format_exc()))\n if self._killing.is_set():\n traceback.print_exc()\n finally:\n sys.stdout.flush()\n sys.stderr.flush()\n\n return errors", "def run(self, data_id, script, verbosity=1):\n if verbosity >= 1:\n print('RUN: {} {}'.format(data_id, script))\n\n self.data_id = data_id\n self.process_failed = False\n\n # Fetch data instance to get any executor requirements.\n self.process = Data.objects.get(pk=data_id).process\n requirements = self.process.requirements\n self.requirements = requirements.get('executor', {}).get(self.name, {})\n self.resources = requirements.get('resources', {})\n\n data_dir = settings.FLOW_EXECUTOR['DATA_DIR']\n dir_mode = getattr(settings, 'FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755)\n\n output_path = os.path.join(data_dir, str(data_id))\n\n os.mkdir(output_path)\n # os.mkdir is not guaranteed to set the given mode\n os.chmod(output_path, dir_mode)\n os.chdir(output_path)\n\n log_file = open('stdout.txt', 'w+')\n json_file = open('jsonout.txt', 'w+')\n\n proc_pid = self.start()\n\n self.update_data_status(\n status=Data.STATUS_PROCESSING,\n started=now(),\n process_pid=proc_pid\n )\n\n # Run processor and handle intermediate results\n self.run_script(script)\n spawn_processors = []\n output = {}\n process_error, process_warning, process_info = [], [], []\n process_progress, process_rc = 0, 0\n\n # read processor output\n try:\n stdout = self.get_stdout()\n while True:\n line = stdout.readline()\n if not line:\n break\n\n try:\n if line.strip().startswith('run'):\n # Save processor and spawn if no errors\n log_file.write(line)\n log_file.flush()\n\n for obj in iterjson(line[3:].strip()):\n spawn_processors.append(obj)\n elif line.strip().startswith('export'):\n file_name = line[6:].strip()\n\n export_folder = settings.FLOW_EXECUTOR['UPLOAD_DIR']\n unique_name = 'export_{}'.format(uuid.uuid4().hex)\n export_path = os.path.join(export_folder, unique_name)\n\n self.exported_files_mapper[self.data_id][file_name] = unique_name\n\n shutil.move(file_name, export_path)\n else:\n # If JSON, save to MongoDB\n updates = {}\n for obj in iterjson(line):\n for key, val in six.iteritems(obj):\n if key.startswith('proc.'):\n if key == 'proc.error':\n process_error.append(val)\n if not process_rc:\n process_rc = 1\n updates['process_rc'] = process_rc\n updates['process_error'] = process_error\n updates['status'] = Data.STATUS_ERROR\n elif key == 'proc.warning':\n process_warning.append(val)\n updates['process_warning'] = process_warning\n elif key == 'proc.info':\n process_info.append(val)\n updates['process_info'] = process_info\n elif key == 'proc.rc':\n process_rc = int(val)\n updates['process_rc'] = process_rc\n if process_rc != 0:\n updates['status'] = Data.STATUS_ERROR\n elif key == 'proc.progress':\n process_progress = int(float(val) * 100)\n updates['process_progress'] = process_progress\n else:\n dict_dot(output, key, val)\n updates['output'] = output\n\n if updates:\n updates['modified'] = now()\n self.update_data_status(**updates)\n\n if process_rc > 0:\n log_file.close()\n json_file.close()\n os.chdir(CWD)\n return\n\n # Debug output\n # Not referenced in Data object\n json_file.write(line)\n json_file.flush()\n\n except ValueError as ex:\n # Ignore if not JSON\n log_file.write(line)\n log_file.flush()\n\n except MemoryError as ex:\n logger.error(__(\"Out of memory: {}\", ex))\n\n except IOError as ex:\n # TODO: if ex.errno == 28: no more free space\n raise ex\n finally:\n # Store results\n log_file.close()\n json_file.close()\n os.chdir(CWD)\n\n return_code = self.end()\n\n if process_rc < return_code:\n process_rc = return_code\n\n # This transaction is needed to make sure that processing of\n # current data object is finished before manager for spawned\n # processes is triggered.\n with transaction.atomic():\n if spawn_processors and process_rc == 0:\n parent_data = Data.objects.get(pk=self.data_id)\n\n # Spawn processors\n for d in spawn_processors:\n d['contributor'] = parent_data.contributor\n d['process'] = Process.objects.filter(slug=d['process']).latest()\n\n for field_schema, fields in iterate_fields(d.get('input', {}), d['process'].input_schema):\n type_ = field_schema['type']\n name = field_schema['name']\n value = fields[name]\n\n if type_ == 'basic:file:':\n fields[name] = self.hydrate_spawned_files(value, data_id)\n elif type_ == 'list:basic:file:':\n fields[name] = [self.hydrate_spawned_files(fn, data_id) for fn in value]\n\n with transaction.atomic():\n d = Data.objects.create(**d)\n DataDependency.objects.create(\n parent=parent_data,\n child=d,\n kind=DataDependency.KIND_SUBPROCESS,\n )\n\n # Copy permissions.\n copy_permissions(parent_data, d)\n\n # Entity is added to the collection only when it is\n # created - when it only contains 1 Data object.\n entities = Entity.objects.filter(data=d).annotate(num_data=Count('data')).filter(num_data=1)\n\n # Copy collections.\n for collection in parent_data.collection_set.all():\n collection.data.add(d)\n\n # Add entities to which data belongs to the collection.\n for entity in entities:\n entity.collections.add(collection)\n\n if process_rc == 0 and not self.process_failed:\n self.update_data_status(\n status=Data.STATUS_DONE,\n process_progress=100,\n finished=now()\n )\n else:\n self.update_data_status(\n status=Data.STATUS_ERROR,\n process_progress=100,\n process_rc=process_rc,\n finished=now()\n )\n\n try:\n # Cleanup after processor\n data_purge(data_ids=[data_id], delete=True, verbosity=verbosity)\n except: # pylint: disable=bare-except\n logger.error(__(\"Purge error:\\n\\n{}\", traceback.format_exc()))\n\n # if not update_data(data): # Data was deleted\n # # Restore original directory\n # os.chdir(settings.PROJECT_ROOT)\n # return\n\n # Restore original directory\n # os.chdir(settings.PROJECT_ROOT)\n\n # return data_id", "def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)", "def process_output(self, data):\n\n if self.interactive_result_stdout_writing:\n self.brief_logger.debug(data)\n if self.verbose_logger:\n self.verbose_logger.info(data)\n\n # f.write(data)\n # show results instantly in log file\n # f.flush()\n\n return data\n\n # TODO: #68: compile re for better performance\n # TODO: RENAME", "def MonitoringProcessOnVM(self, pID, remoteLogFile=None):\n statusCode = 0 # process exit code\n timeTick = 6 # delay time to check process\n\n try:\n status = self.VMStatus()\n\n if status != 'POWERED ON':\n raise Exception('Virtual machine must be started before process monitoring!')\n\n LOGGER.info('Starting process [PID = {}] monitoring...'.format(pID))\n print(\"##teamcity[progressStart 'Executing process with PID = {} on virtual machine']\".format(pID))\n\n processActive = True\n timeCount = 0\n while processActive:\n time.sleep(timeTick)\n timeCount += timeTick\n\n processList = self.vmInstance.list_processes()\n\n for item in processList:\n if str(item['pid']) == str(pID):\n LOGGER.debug(r' Process status-line given by VM-tools: {}'.format(item))\n if item['exit_code'] is not None:\n statusCode = item['exit_code']\n processActive = False\n\n break\n\n print(\"##teamcity[progressFinish 'Executing process with PID = {} on virtual machine']\".format(pID))\n LOGGER.info('Process finished successful with exit code = {}. Duration: ~{} sec.'.format(statusCode, timeCount))\n\n LOGGER.debug('Downloading process log-file from VM...')\n logFile = os.path.abspath(os.path.join(os.curdir, os.path.basename(remoteLogFile))) # local log file\n\n if remoteLogFile:\n if os.path.exists(logFile):\n os.remove(logFile)\n\n self.CopyFileFromVM(srcFile=remoteLogFile, dstFile=logFile, overwrite=True)\n\n if os.path.exists(logFile):\n with open(logFile, 'r') as fH:\n output = fH.readlines()\n\n if output and len(output) > 0:\n LOGGER.debug('Process output:')\n for line in output:\n LOGGER.debug(' {}'.format(line.strip()))\n\n except Exception as e:\n LOGGER.debug(e)\n LOGGER.error('Unknown exception occurred while process executing!')\n LOGGER.error(traceback.format_exc())\n statusCode = -1\n\n return statusCode", "def run(self):\n\n try:\n self.parent.setEnabled(False)\n\n # Run DataLab processing; compute and write requested logger statistics and spectrograms\n if self.processing_mode == \"screening\":\n self.processing_hub.run_screening()\n elif self.processing_mode == \"integration\":\n self.processing_hub.run_ts_integration()\n\n # Emit processed results to outside worker to present in gui\n self.signal_screening_output_to_gui.emit(self.processing_hub)\n except ValueError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except TypeError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except ZeroDivisionError as e:\n self.signal_error.emit(str(e))\n logging.exception(e)\n except Exception as e:\n msg = \"Unexpected error during processing\"\n self.signal_error.emit(f\"{msg}:\\n{e}\\n{sys.exc_info()[0]}\")\n logging.exception(e)\n finally:\n self.parent.setEnabled(True)\n self.parent.statusbar.showMessage(\"\")", "def run(self):\n output_path = \"%scompleted_subsetting_%s.log\" % (\n self.logging_path, self.pipeline_task\n )\n open(output_path, 'a').close()", "def exec_shell(self):\n command = self.state_frame[0]\n required = self.state == STATE_EXEC_SHELL\n\n try:\n results = subprocess.check_output(\n command,\n cwd=self.working_dir,\n shell=True\n )\n self.log_file.write(results)\n self.state = STATE_READ_LINE\n except subprocess.CalledProcessError as e:\n self.log_file.write('[ERROR] ' + str(e))\n self.state_frame = [self.worker_name + '.txt']\n if required:\n self.state = STATE_SEND_LOG_TERMINAL\n else:\n self.state = STATE_READ_LINE", "def __init__(self, name, command, output, events,\n restart_process, repeats_output_when_opened):\n super(ProcessLogger, self).__init__()\n self.name = name\n self.command = command\n self.restart_process = restart_process\n self.repeats_output_when_opened = repeats_output_when_opened\n self.process = None\n self.lock = threading.Lock()\n self.looking = False\n\n # Compile the list of regexes that we're supposed to be looking for.\n self.events = []\n for event in events:\n self.events.append(ProcessLogger.EventScanner(event.name, self.name,\n event.regex))\n\n if output:\n stress_test_common.MakeDirsIfNeeded(os.path.dirname(output))\n self.output_fp = open(output, \"w\", encoding=\"utf-8\")\n logging.info(\"Logging device info to %s\", output)\n else:\n self.output_fp = None", "def logToFile(output, file): \r\n print( output, file=file )", "def main():\n flags = parser_create()\n config_data = config_loader_yaml(flags.config_file)\n loggers_config = get_loggers_config(config_data)\n logging_queue = multiprocessing.Queue()\n logging_worker = LoggingWorker(loggers_config, logging_queue)\n logging_worker.start()\n\n class_name = \"\"\n function_name = inspect.stack()[0][3]\n\n for i in range(5):\n log_message(logging_queue, 'DEBUG', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'INFO', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'WARNING', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'ERROR', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'CRITICAL', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'Unknown', __name__, class_name, function_name, 'Message ' + str(i))\n\n logging_queue.put(None)\n logging_worker.join()", "def run_pipeline(conf_info):\n # Fail if the run_path doesn't actually exist\n if not os.path.exists(conf_info.run_path):\n LOGGER.error('Run path does not exist: %s' \\\n % (conf_info.run_path))\n return False\n\n # Change cwd to run_path\n stdout_filepath = os.path.join(conf_info.analysis_dir, 'pipeline_run_stdout.txt')\n stderr_filepath = os.path.join(conf_info.analysis_dir, 'pipeline_run_stderr.txt')\n\n # Create status object\n conf_info.createStatusObject()\n\n # Monitor file creation\n wm = WatchManager()\n mask = EventsCodes.IN_DELETE | EventsCodes.IN_CREATE\n event = RunEvent(conf_info)\n notifier = ThreadedNotifier(wm, event)\n notifier.start()\n wdd = wm.add_watch(conf_info.run_path, mask, rec=True)\n\n # Log pipeline starting\n LOGGER.info('STARTING PIPELINE @ %s' % (time.ctime()))\n\n # Start the pipeline (and hide!)\n #pipe = subprocess.Popen(['make',\n # '-j8',\n # 'recursive'],\n # stdout=subprocess.PIPE,\n # stderr=subprocess.PIPE)\n\n fout = open(stdout_filepath, 'w')\n ferr = open(stderr_filepath, 'w')\n\n pipe = subprocess.Popen(['make',\n '--directory=%s' % (conf_info.run_path),\n '-j8',\n 'recursive'],\n stdout=fout,\n stderr=ferr)\n #shell=True)\n # Wait for run to finish\n retcode = pipe.wait()\n\n\n # Clean up\n notifier.stop()\n fout.close()\n ferr.close()\n\n # Process stderr\n ferr = open(stderr_filepath, 'r')\n\n run_failed_stderr = False\n for line in ferr:\n err_status = pipeline_stderr_handler(line, conf_info)\n if err_status == RUN_FAILED:\n run_failed_stderr = True\n\n ferr.close()\n\n # Finished file check!\n print('RUN SUCCESS CHECK:')\n for key, value in event.run_status_dict.items():\n print(' %s: %s' % (key, value))\n\n dstatus = event.run_status_dict\n\n # Success or failure check\n status = (retcode == 0) and \\\n run_failed_stderr is False and \\\n dstatus['firecrest'] is True and \\\n dstatus['bustard'] is True and \\\n dstatus['gerald'] is True\n\n return status", "def _run_command(self, job, config):\n # Bail early if there is nothing do do\n if job.command is None:\n return None, ()\n # Get an extcmd delegate for observing all the IO the way we need\n delegate, io_log_gen = self._prepare_io_handling(job, config)\n # Create a subprocess.Popen() like object that uses the delegate\n # system to observe all IO as it occurs in real time.\n extcmd_popen = extcmd.ExternalCommandWithDelegate(delegate)\n # Stream all IOLogRecord entries to disk\n record_path = os.path.join(\n self._jobs_io_log_dir, \"{}.record.gz\".format(\n slugify(job.id)))\n with gzip.open(record_path, mode='wb') as gzip_stream, \\\n io.TextIOWrapper(\n gzip_stream, encoding='UTF-8') as record_stream:\n writer = IOLogRecordWriter(record_stream)\n io_log_gen.on_new_record.connect(writer.write_record)\n # Start the process and wait for it to finish getting the\n # result code. This will actually call a number of callbacks\n # while the process is running. It will also spawn a few\n # threads although all callbacks will be fired from a single\n # thread (which is _not_ the main thread)\n logger.debug(\n _(\"job[%s] starting command: %s\"), job.id, job.command)\n # Run the job command using extcmd\n return_code = self._run_extcmd(job, config, extcmd_popen)\n logger.debug(\n _(\"job[%s] command return code: %r\"), job.id, return_code)\n return return_code, record_path", "def start_test_exec(cls):\n time_str = cls.get_current_time()\n os.system(\"robot -l ./logs/log_{0}.html -r ./logs/report_{0}.html -o ./logs/output_{0}.xml \\\n ./test_suite/{1}\".format(time_str, test_suite))", "def _log_operation(self, operation, logdir, stdout, stderr):\n self.logger.debug(\"log operation results\")\n # log filenames\n log_file = os.path.join(logdir, operation + \".log\")\n err_file = os.path.join(logdir, operation + \"-err.log\")\n # always log stdout\n self.logger.info(\"complete informations in '%s'\" % log_file)\n log = open(log_file, \"w\")\n log.write(stdout)\n log.close()\n # only log if there is errors\n if stderr is not \"\":\n self.logger.warning(\"errors are logged in '%s'\" % err_file)\n err = open(err_file, \"w\")\n err.write(stderr)\n err.close()", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True", "def logOutput(self, line):\r\n self.writeToLog('output', line)", "def run(self):\n print \"%s: Start %s (%d s, pid: %d)\" % (self.getName(),time.ctime(),self.time,os.getpid())\n time.sleep(self.time)\n print \"%s: End %s\" % (self.getName(),time.ctime())", "def Run(self) -> None:\n logging.info(\"Running %s in a subprocess...\", self)\n self.stdout = tempfile.TemporaryFile()\n self.stderr = tempfile.TemporaryFile()\n self.begin_time = time.time()\n env = os.environ.copy()\n # Give each test program a separate test_tmpdir so they don't overwrite\n # each other when running in parallel.\n env[\"TEST_TMPDIR\"] = tempfile.mkdtemp()\n # Bazel's test sharding protocol:\n # https://docs.bazel.build/versions/master/test-encyclopedia.html\n if self.total_shards > 1:\n env[\"TEST_TOTAL_SHARDS\"] = str(self.total_shards)\n env[\"TEST_SHARD_INDEX\"] = str(self.shard_id)\n\n self.subprocess = subprocess.Popen(\n [_GetPython(), self.path], stdout=self.stdout, stderr=self.stderr,\n env=env)", "def test_stdout_log(self, logger: Logger) -> None:\n task = OctaveTask()\n task.session_id = \"123\"\n handler = OutputHandler(task)\n logger.addHandler(handler)\n\n # Write something to the log\n msg = \"I am a message\"\n logger.info(msg)\n\n assert len(handler.contents) == 1\n assert handler.messages() == msg", "def strategy(process, outputs):\n status = _maybe_use_running_output(process, outputs)\n if status is not None:\n return status\n\n def print_dots(status_queue):\n \"\"\"Print a dot every dot_timeout seconds.\"\"\"\n while True:\n # Exit when something gets written to the pipe\n try:\n status_queue.get(True, dot_timeout)\n return\n except Empty:\n IndentedLogger.dot()\n\n status_queue = Queue()\n dots_thread = threading.Thread(target=print_dots,\n args=(status_queue, ))\n dots_thread.start()\n\n try:\n status = output_on_fail(process, outputs)\n finally:\n status_queue.put(\"done\")\n dots_thread.join()\n\n return status", "def _log_output(config: Configuration, stderr: IO, signal: RunningSignal):\n if config.verbose > 0:\n logger = logging.getLogger(\"ffmpeg\")\n while signal.running():\n try:\n line = _readline(stderr)\n if line != '':\n logger.info(line)\n except:\n pass\n logger.debug(\"Logging thread ended\")", "def do_log(self, line):\n subprocess.call([\n 'git', '-C', self.repo.path,\n 'log', '--graph', '--oneline',\n \"{}...pr/{}\".format(self.review.pr.base.ref, self.review.pr.number),\n ])", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def redirect_log(wkdir):\n import os, sys\n import multiprocess as mp\n # define stdout and stderr names\n \n stdout = os.path.join(wkdir, 'stdout')\n stderr = os.path.join(wkdir, 'stderr')\n _info(\" stdout->[%s]\" % stdout)\n _info(\" stderr->[%s]\" % stderr)\n\n # synch-ing log\n map(lambda x: x.flush(), (sys.stdout, sys.stderr))\n\n flags = os.O_CREAT | os.O_WRONLY\n fdout = os.open (stdout, flags)\n assert fdout>=0, \\\n \"problem child [%r] opening stdout\" % mp.current_process()\n fileno = sys.stdout.fileno()\n os.close (fileno)\n os.dup2 (fdout, fileno)\n\n fderr = os.open (stderr, flags)\n assert fderr>=0, \\\n \"problem child [%r] opening stderr\" % mp.current_process()\n fileno = sys.stderr.fileno()\n os.close (fileno)\n os.dup2 (fderr, fileno)", "def log_completed_run(self, log_file_path):\n with open(log_file_path, \"r\") as f:\n stats = f.read().splitlines()\n\n self._parse_log_entry(stats)\n self.experiment.log_other(\"log_file_path\", log_file_path)", "def start(self, suite, args=[]):\n self.suite = suite\n self.output_file = \"%s_Output.xml\" % (self.name)\n temp, suiteName = os.path.split(payload) \n jyLog = open(os.path.join(logFolder, (\"%s_Log.txt\" % self.name)), \"w\") \n jybotCommand = \"pybot -o %s %s\" % (os.path.join(logFolder, self.output_file), self.suite)\n \n print \"Executing : %s ...\" % jybotCommand\n self.running = True\n self.process = subprocess.Popen([\"pybot\", \"-o\", \"%s\" % os.path.join(logFolder, self.output_file), \"%s\" % self.suite], cwd=clientCwd, stdout=jyLog, stderr=jyLog)", "def send(self):\n for output in self.outputs:\n output.send(self.logger)", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )", "def run():\n logger.info(f\"Process started:\")\n logger.info(f\"Converting Glove file to Word2Vec format\")\n convert_to_word2vec.convert(\n \"./data/source/glove.6B.50d.txt\", \"./data/source/glove.6B.50d.w2vformat.txt\"\n )\n\n logger.info(f\"Extracting Click Stream data\")\n extract_click_stream_data()\n\n logger.info(\"Extracting Wiki articles\")\n extract_wiki_articles()\n\n logger.info(f\"Generating Clickstream dataset\")\n generate_datasets()\n\n logger.info(\"Tokenizing articles\")\n WikiArticlesTokenizer().process()\n\n logger.info(\"Creating dataset with Wiki Articles\")\n create_wiki_articles_dataset()", "def test_run_exec(self):\n from multiprocessing import Process, Queue\n output = Queue()\n repodir = \"~/codes/ci/tests/repo\"\n processes = []\n for i in range(3):\n processes.append(Process(target=run_exec, args=(repodir, \"ls -la\", output, i)))\n processes[-1].start()\n \n #Wait for the unit tests to all finish.\n for p in processes:\n p.join()\n results = [output.get() for p in processes]\n ordered = {o[\"index\"]: o for o in results}\n\n #We consider the test successful if the output files were created and the end time\n #is not None. That means that the process ran correctly and python didn't lose\n #control of the subprocess.\n from os import path\n fullrepo = path.expanduser(repodir)\n for i in range(3):\n self.assertTrue(path.isfile(path.join(fullrepo, \"{}.cidat\".format(i))))\n self.assertIsNotNone(ordered[i][\"end\"])\n self.assertEqual(ordered[i][\"code\"], 0)", "def run(self):\n while self.container.process(): pass", "def run(self):\n # get components list\n #component_id_list = self.getComponentsList()\n asset_id = 3776\n component_id_list = self.get_component_info_for_one_asset(asset_id)\n # call computeResults method\n results = self.compute_results(component_id_list)\n # write to the output file\n self.write_to_file(results)", "def execute_process(uuid, output, hadoop):\n # Ruta del proceso\n backend_path = \"/home/bigdata07/backend\"\n # Path para el proceso de log\n path = \"%s/logs/%s.txt\" % (backend_path, uuid)\n # Comando para crear la carpeta para guardar los resultados del proceso de Hadoop\n backend_output_dir = \"%s/output/%s\" % (backend_path, uuid)\n mkdir_output = \"mkdir -p %s\" % (backend_output_dir)\n # Comando para hacer get de HDFS al home\n get_output = \"hdfs dfs -get %s/* %s/\" % (output, backend_output_dir)\n with open(path, \"w\") as file:\n # Ejecutar Hadoop\n subprocess.run(hadoop.split(\" \"), check=True, stdout=file, stderr=file)\n subprocess.run(mkdir_output.split(\" \"), check=True, stdout=file, stderr=file)\n subprocess.run(get_output.split(\" \"), check=True, stdout=file, stderr=file)\n # Resolve() de una promesa en JS\n return backend_output_dir", "def run(self):\n print('A simple bot started the process.')\n try:\n self.calculate_before_process()\n\n if self.process == \"Like\":\n self.process_like()\n elif self.process == \"Like-and-follow\":\n self.process_like_and_follow()\n except Exception as e:\n print(e)\n finally:\n self.dump_all()\n print('A simple bot finished the process.')", "def _executeProc(self, context, dry_run):\n # Call standard _setup\n self._setup(context)\n a = self.args\n\n if not dry_run:\n # Now set status to started\n context.setStatus(STATUS.STARTED, 'Job is now running', 0)\n\n # Add output file \n stationsFile = 'weather_stations.txt'\n stationsFilePath = os.path.join(context.processDir, \"outputs\", stationsFile)\n\n if not dry_run:\n # Call code to get Weather Stations\n stationList = gws_utils.getStationList(a[\"Counties\"], a[\"BBox\"], a[\"DataTypes\"], \n a[\"StartDateTime\"], a[\"EndDateTime\"], stationsFilePath)\n \n # Add the stations list to the XML output section: ProcessSpecificContent\n context.outputs['ProcessSpecificContent'] = {\"WeatherStations\": \" \".join(stationList)} \n\n # In this case we want to inform the output XML that you can send the outputs to a separate process\n # This string can be picked up the an intelligent client in order to construct a new WPS request\n # with this job as its input\n context.outputs['job_details']['job_capabilities'] = \"send_to_extract_weather_data\"\n\n if not dry_run:\n # We can log information at any time to the main log file\n context.log.info('Written output file: %s' % stationsFilePath)\n else:\n context.log.debug(\"Running dry run.\")\n\n # Add the stations file to the outputs\n if not dry_run:\n self._addFileToOutputs(stationsFilePath, 'Weather Stations File')\n else:\n # Estimate size of outputs by estimating the number of stations\n if len(a[\"Counties\"]) > 0:\n nEstimatedStations = len(a[\"Counties\"]) * 15\n else:\n (w, s, e, n) = a[\"BBox\"]\n lonExtent = abs(e - w)\n latExtent = n - s\n nEstimatedStations = int(lonExtent * latExtent * 50)\n\n estimatedVolume = nEstimatedStations * 5\n self._addFileToOutputs(stationsFilePath, 'Weather Stations File', size = estimatedVolume)\n\n if not dry_run:\n # Finish up by calling function to set status to complete and zip up files etc\n # In this case we set keep = True so that weather station file is accessible to downstream process\n # without unzipping. This is fine as files are small.\n process_support.finishProcess(context, self.fileSet, self.startTime, keep = True)\n else:\n estimated_duration = 10 # seconds\n process_support.finishDryRun(context, [], self.fileSet,\n estimated_duration, acceptedMessage = 'Dry run complete')", "def start(self, _=False):\n if not self._stop:\n self._current_execution += 1\n flags = self.flags\n if '--write' not in flags:\n flags.extend(['--write', self.writepath])\n if '--output-format' not in flags:\n flags.extend(['--output-format', 'csv'])\n line = [\"airodump-ng\"] + flags + self.arguments + [self.interface]\n self._proc = Popen(line, bufsize=0,\n env={'PATH': os.environ['PATH']},\n stderr=DEVNULL, stdin=DEVNULL, stdout=DEVNULL)\n os.system('stty sane')\n\n time.sleep(5)\n watcher = threading.Thread(target=self.watch_process)\n watcher.start()", "def __process_status_callback(self, stdout, stderr):\n #log for debug installation logs\n if stdout:\n self.logger.info(u'Driver process stdout: %s' % stdout)\n if stderr:\n self.logger.error(u'Driver process stderr: %s' % stderr)", "def cli(stage, maas_url, maas_key, debug, output_log):\n if debug:\n log.setLevel(logging.DEBUG)\n detail = {'start': datetime.utcnow().isoformat(), }\n results = {}\n log_id = '{}-{}'.format(str(stage), datetime.utcnow().isoformat().replace(':', '_').replace('.', '_'))\n dt = datetime.utcnow()\n results['_summary'] = {'start': dt.isoformat()}\n log.info('Starting to run proces for stage {} with commands: '.format(str(stage), COMMAND_LIST[:stage + 1]))\n last_status_code = 0\n for cmd in COMMAND_LIST[:stage + 1]:\n if last_status_code == 0:\n results[cmd] = timed_cmd('{}'.format(cmd))\n last_status_code = results[cmd].get('return_code', 0)\n elapsed = (datetime.utcnow() - dt).total_seconds()\n results['_summary']['elapsed'] = elapsed\n if output_log:\n if not os.path.isdir('log'):\n os.makedirs('log')\n if not os.path.isdir('log/{}'.format(str(stage))):\n os.makedirs('log/{}'.format(str(stage)))\n with open('log/{}/{}.json'.format(str(stage), log_id), 'w') as f:\n f.write(json.dumps(results))\n if results:\n log.info('')\n log.info(' started - cmd - elapsed')\n for k, v in results.items():\n log.info('{} - {} - {} seconds'.format(v.get('start'), k, v.get('elapsed')))", "def run(self):\n logger = self.logger\n cwd = os.getcwd()\n for step_name in self.steps_to_run:\n step = self.steps[step_name]\n if step.cached:\n logger.info(' * Cached: {}'.format(step_name))\n continue\n step.config = self.config\n new_log_file = self.new_step_log_file\n if self.log_filename is not None:\n step.log_filename = self.log_filename\n do_local_logging = True\n else:\n # We only want to do local log output if the step output is\n # being redirected to a file. Otherwise, we assume we're\n # probably just running one step and the local logging is\n # redundant and unnecessary\n do_local_logging = new_log_file\n\n if do_local_logging:\n logger.info(' * Running {}'.format(step_name))\n try:\n self._run_step(step, new_log_file)\n except BaseException:\n if do_local_logging:\n logger.info(' Failed')\n raise\n\n if do_local_logging:\n logger.info(' Complete')\n\n os.chdir(cwd)", "def run_processing_engine(input_file):\n from subprocess import Popen, PIPE\n p = Popen([\"python\", \"process.py\", input_file, \"-v\"], stdout=PIPE)\n return p.wait()", "def run(filepath=\"log.txt\"):\n if (is_windows(False)):\n print(color.red(\"\\nYour system isn't *unix\\n\"))\n return\n filepath = path.abspath(filepath)\n # 获取filepath的文件夹路径\n dirpath = path.dirname(filepath)\n # 判断文件夹是否存在\n if not path.exists(dirpath):\n print(color.red(\"\\nFile path is invalid\\n\"))\n return\n # 判断文件夹是否可写\n if access(dirpath, W_OK):\n print(color.green(f\"\\nSet log in {filepath}\\n\"))\n sys.stdout = Logger(filepath, sys.__stdout__)\n sys.stderr = Logger(filepath, sys.__stderr__)\n gset(\"log_filepath\", filepath, True)\n gset(\"log_stdout\", sys.stdout, True)\n gset(\"log_stderr\", sys.stderr, True)\n else:\n print(color.red(\"\\nFile path is invalid\\n\"))", "def run(*argv):\n print(*argv, file=sys.stderr)\n subprocess.check_call(argv, stdout=sys.stderr)", "def process(self):\n pass", "def _worker(self, task, task_queue):\n with _thread_lock:\n task.process_number = self.counter\n self.counter += 1\n if task.output:\n if not task.has_error and task.processtime > 0:\n if not os.path.isfile(task.logfile):\n task.logfile = \"\"\n task_queue.put(task)\n return\n try:\n if not os.path.exists(task.folder):\n task.add_error('Could not find folder: {}'.format(task.folder))\n task.logfile = \"\"\n else:\n tmp_kwargs = dict(mode='a+', prefix=self.logfile_prefix,\n suffix='.log', dir=task.folder, delete=False)\n with NamedTemporaryFile(**tmp_kwargs) as logfile:\n logfile.write('########### MACRO #############\\n')\n logfile.write(\"\\n\".join(task.macro))\n logfile.write('\\n\\n######### OUTPUT LOG ##########')\n logfile.flush()\n task.logfile = logfile.name\n starttime = time.clock()\n exe_args = dict(macro=task.macro,\n logfile=logfile,\n anybodycon_path=self.anybodycon_path,\n timeout=self.timeout,\n keep_macrofile=self.keep_logfiles,\n env=self.env)\n retcode = _execute_anybodycon(**exe_args)\n endtime = time.clock()\n logfile.seek(0)\n if retcode == _KILLED_BY_ANYPYTOOLS:\n task.processtime = 0\n return\n task.processtime = endtime - starttime\n task.output = parse_anybodycon_output(\n logfile.read(),\n self.ignore_errors,\n self.warnings_to_include)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n task.add_error(str(exc_type) + '\\n' + str(fname) + '\\n' + str(exc_tb.tb_lineno))\n logger.debug(str(e))\n finally:\n if not self.keep_logfiles and not task.has_error:\n try:\n silentremove(logfile.name)\n task.logfile = \"\"\n except OSError as e:\n pass # Ignore if AnyBody has not released the log file.\n task_queue.put(task)", "def main():\n logging.basicConfig() # create a baseconfiguration s.t. we cann now log \n cycle = 0\n while True:\n\n logging.info(f\"{time.now()} - Start cycle {cycle}\") # changed from print to info \n do_unstable_magick(cycle)\n logging.info(f\"{time.nos()} - Finished cycle {cycle}\")" ]
[ "0.6857958", "0.6650118", "0.652399", "0.6440963", "0.64121723", "0.6306234", "0.62717485", "0.62601966", "0.62601346", "0.6242067", "0.62301815", "0.62156785", "0.6205138", "0.61483353", "0.61483353", "0.6128637", "0.6101594", "0.60817", "0.605676", "0.59793603", "0.59590703", "0.59398943", "0.5936238", "0.586579", "0.58639866", "0.58246017", "0.58240336", "0.5803933", "0.5801371", "0.5781672", "0.5780901", "0.57747877", "0.57498735", "0.5741924", "0.57336235", "0.572322", "0.5713161", "0.56736183", "0.5658528", "0.5657782", "0.56536335", "0.5652201", "0.5633632", "0.5632947", "0.5632194", "0.56149095", "0.5605441", "0.55956995", "0.5583202", "0.5582736", "0.5574801", "0.5560282", "0.5553154", "0.5537046", "0.55266106", "0.55223", "0.55186564", "0.551855", "0.5496062", "0.54812104", "0.54761505", "0.5473976", "0.5462784", "0.54591954", "0.54511005", "0.54441017", "0.543925", "0.5438033", "0.54374456", "0.5428149", "0.54250616", "0.5423555", "0.5418924", "0.5415457", "0.5409078", "0.5405038", "0.5404789", "0.54040325", "0.54038614", "0.54032034", "0.5403176", "0.5403103", "0.5402774", "0.54024017", "0.5389038", "0.53864", "0.53841573", "0.53817755", "0.53810275", "0.5379714", "0.537778", "0.53775567", "0.5376486", "0.53719354", "0.5371691", "0.5370933", "0.5369757", "0.53677464", "0.5361305", "0.5359156", "0.53576016" ]
0.0
-1
This method generates a header file containing the data contained in the numpy array provided. It is used to capture the tensor data (for both inputs and expected outputs) to be bundled into the standalone application.
def _create_header_file(tensor_name, npy_data, output_path, data_linkage): file_path = pathlib.Path(f"{output_path}/" + tensor_name).resolve() # create header file raw_path = file_path.with_suffix(".h").resolve() with open(raw_path, "w") as header_file: header_file.write("#include <stddef.h>\n") header_file.write("#include <stdint.h>\n") header_file.write("#include <dlpack/dlpack.h>\n") header_file.write(f"const size_t {tensor_name}_len = {npy_data.size};\n") _emit_data_linkage(header_file, data_linkage) header_file.write(f"{NP_TYPE_TO_C[str(npy_data.dtype)]} {tensor_name}[] =") header_file.write("{") for i in np.ndindex(npy_data.shape): header_file.write(f"{npy_data[i]}, ") header_file.write("};\n\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_header(_metadata, rename_padding=False):\n template = \"\"\"\\\n VERSION {version}\n FIELDS {fields}\n SIZE {size}\n TYPE {type}\n COUNT {count}\n WIDTH {width}\n HEIGHT {height}\n VIEWPOINT {viewpoint}\n POINTS {points}\n DATA {data}\n \"\"\"\n str_metadata = _metadata.copy()\n\n if not rename_padding:\n str_metadata['fields'] = ' '.join(_metadata['fields'])\n else:\n new_fields = []\n for f in _metadata['fields']:\n if f == '_':\n new_fields.append('padding')\n else:\n new_fields.append(f)\n str_metadata['fields'] = ' '.join(new_fields)\n str_metadata['size'] = ' '.join(map(str, _metadata['size']))\n str_metadata['type'] = ' '.join(_metadata['type'])\n str_metadata['count'] = ' '.join(map(str, _metadata['count']))\n str_metadata['width'] = str(_metadata['width'])\n str_metadata['height'] = str(_metadata['height'])\n str_metadata['viewpoint'] = ' '.join(map(str, _metadata['viewpoint']))\n str_metadata['points'] = str(_metadata['points'])\n tmpl = template.format(**str_metadata)\n return tmpl", "def generate_header(value_type, num_elements, element_multiplier, imag, name_length, name):\n result = []\n\n result += Ensemble.int32_to_bytes(value_type) # Value Type\n result += Ensemble.int32_to_bytes(num_elements) # Number of elements\n result += Ensemble.int32_to_bytes(element_multiplier) # Element Multiplier\n result += Ensemble.int32_to_bytes(imag) # Image\n result += Ensemble.int32_to_bytes(name_length) # Name Length\n result += name.encode() # Name\n\n return result", "def generateNHDRHeader(self, inputFile):\r\n\r\n logging.info('Processing started')\r\n #initialize PCR object\r\n imagePCRFile = PCRDataObject()\r\n #import image parameters of PCR object\r\n imagePCRFile.ImportFromFile(inputFile)\r\n\r\n filePathName, fileExtension = os.path.splitext(inputFile)\r\n #The directory of the .nhdr file\r\n nhdrPathName = filePathName + \".nhdr\"\r\n\r\n if fileExtension == \".pcr\":\r\n if imagePCRFile.form == 1 or imagePCRFile.form == 5 or imagePCRFile.form == 10:\r\n with open(nhdrPathName, \"w\") as headerFile:\r\n headerFile.write(\"NRRD0004\\n\")\r\n headerFile.write(\"# Complete NRRD file format specification at:\\n\")\r\n headerFile.write(\"# http://teem.sourceforge.net/nrrd/format.html\\n\")\r\n if imagePCRFile.form == 5:\r\n headerFile.write(\"type: ushort\\n\")\r\n elif imagePCRFile.form == 10:\r\n headerFile.write(\"type: float\\n\")\r\n elif imagePCRFile.form == 1:\r\n headerFile.write(\"type: uchar\\n\")\r\n headerFile.write(\"dimension: 3\\n\")\r\n headerFile.write(\"space: left-posterior-superior\\n\")\r\n sizeX = imagePCRFile.X\r\n sizeY = imagePCRFile.Y\r\n sizeZ = imagePCRFile.Z\r\n headerFile.write(f\"sizes: {sizeX} {sizeY} {sizeZ}\\n\")\r\n volSpace = imagePCRFile.voxelSize\r\n headerFile.write(f\"space directions: ({volSpace}, 0.0, 0.0) (0.0, {volSpace}, 0.0) (0.0, 0.0, {volSpace})\\n\")\r\n headerFile.write(\"kinds: domain domain domain\\n\")\r\n headerFile.write(\"endian: little\\n\")\r\n headerFile.write(\"encoding: raw\\n\")\r\n headerFile.write(\"space origin: (0.0, 0.0, 0.0)\\n\")\r\n volPathName = filePathName + \".vol\"\r\n volPathSplit = []\r\n volPathSplit = volPathName.split('/')\r\n volFileName = volPathSplit[len(volPathSplit)-1]\r\n headerFile.write(f\"data file: {volFileName}\\n\")\r\n # print(imagePCRFile.form)\r\n print(f\".nhdr file path is: {nhdrPathName}\")\r\n #Automatically loading .vol file using the generated .nhdr file.\r\n if os.path.exists(volPathName):\r\n slicer.util.loadVolume(nhdrPathName)\r\n print(f\"{volFileName} loaded\\n\")\r\n else:\r\n print(f\"{volFileName} is not in the same directory\\n\")\r\n else:\r\n print(\"The format of this dataset is currently not supported by this module. Currently only float (format=10), unsigned 16 bit integer (format=5) and unsigned 8 bit integer (format=1) data types are supported. Please contact us with this dataset to enable this data type.\")\r\n else:\r\n print(\"This is not a PCR file, please re-select a PCR file\")", "def write_cpp_header(self):\n prefix = \"#include <frc/controller/\"\n headers = []\n headers.append(prefix + self.plant_coeffs_header + \".h>\")\n headers.append(prefix + self.ctrl_coeffs_header + \".h>\")\n headers.append(prefix + self.obsv_coeffs_header + \".h>\")\n headers.append(prefix + self.loop_header + \".h>\")\n\n with open(\n self.class_name + \"Coeffs.\" + self.header_extension, \"w\"\n ) as header_file:\n print(\"#pragma once\" + os.linesep, file=header_file)\n for header in sorted(headers):\n print(header, file=header_file)\n header_file.write(os.linesep)\n self.__write_cpp_func_name(\n header_file, self.plant_coeffs_type, \"PlantCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.ctrl_coeffs_type, \"ControllerCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.obsv_coeffs_type, \"ObserverCoeffs\", in_header=True\n )\n self.__write_cpp_func_name(\n header_file, self.loop_type, \"Loop\", in_header=True\n )", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.txt\", \"w\")\n file.write(str(\"\\t\".join(hdata)) + \"\\n\")", "def IIR_sos_header(fname_out,SOS_mat):\r\n Ns,Mcol = SOS_mat.shape\r\n f = open(fname_out,'wt')\r\n f.write('//define a IIR SOS CMSIS-DSP coefficient array\\n\\n')\r\n f.write('#include <stdint.h>\\n\\n')\r\n f.write('#ifndef STAGES\\n')\r\n f.write('#define STAGES %d\\n' % Ns)\r\n f.write('#endif\\n')\r\n f.write('/*********************************************************/\\n');\r\n f.write('/* IIR SOS Filter Coefficients */\\n');\r\n f.write('float32_t ba_coeff[%d] = { //b0,b1,b2,a1,a2,... by stage\\n' % (5*Ns))\r\n for k in range(Ns):\r\n if (k < Ns-1):\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e,\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n else:\r\n f.write(' %+-13e, %+-13e, %+-13e,\\n' % \\\r\n (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n f.write(' %+-13e, %+-13e\\n' % \\\r\n (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # for k in range(Ns):\r\n # if (k < Ns-1):\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f,\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n # else:\r\n # f.write(' %15.12f, %15.12f, %15.12f,\\n' % \\\r\n # (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))\r\n # f.write(' %15.12f, %15.12f\\n' % \\\r\n # (-SOS_mat[k,4],-SOS_mat[k,5]))\r\n f.write('};\\n')\r\n f.write('/*********************************************************/\\n')\r\n f.close()", "def write_header(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"w+\")\r\n self.file.write(self.version)\r\n for list_item in self.list_of_header_objects:\r\n self.file.write(list_item.line)\r\n self.file.write(self.body_header_line.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_header_objects:\r\n print(list_item.line)\r\n print(self.body_header_line.line)", "def generate_header():\n header_file = AUTOGEN_WARNING\n header_file += \"/// /file atomic_nuclear_data.h\\n\"\n header_file += \"/// /author Andrew Davis (andrew.davis@wisc.edu)\\n\"\n header_file += \"///\\n\"\n header_file += (\n \"/// /brief Implements all the fundamental atomic & nuclear data data\\n\"\n )\n header_file += \"#include <map>\\n\"\n header_file += \"\\n\"\n header_file += \"namespace pyne\\n\"\n header_file += \"{\\n\"\n header_file += (\n \" /// main function to be called when you wish to load the nuclide data \\n\"\n )\n header_file += \" /// into memory \\n\"\n header_file += \" void _load_atomic_mass_map_memory();\\n\"\n header_file += \" /// function to create mapping from nuclides in id form\\n\"\n header_file += \" /// to their atomic masses\\n\"\n header_file += \" \\n\"\n header_file += \" void _insert_atomic_mass_map();\\n\"\n header_file += \" \\n\"\n header_file += \" /// function to create mapping from nuclides in id form \\n\"\n header_file += \" /// to their natural abundances\\n\"\n header_file += \" void _insert_abund_map();\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to their natural abundances\\n\"\n )\n header_file += \" extern std::map<int,double> natural_abund_map;\\n\"\n header_file += \" \\n\"\n header_file += \" /// Mapping from nuclides in id form to their atomic masses.\\n\"\n header_file += \" extern std::map<int,double> atomic_mass_map;\\n\"\n header_file += \" \\n\"\n header_file += (\n \" /// Mapping from nuclides in id form to the associated error in \\n\"\n )\n header_file += \" /// abdundance \\n\"\n header_file += \" extern std::map<int,double> atomic_mass_error_map;\\n\"\n header_file += \"} // namespace pyne\\n\"\n return header_file", "def generateHeader(param_dict, filename_out, test_mode=False, template=\"uvfits_headers/header.tpl\"):\n findAndReplace(param_dict, template,filename_out, test_mode)", "def write_sim_header_data(self, output):\n for section in output:\n if section[0] == Datatype.title:\n self._writer.writerow([section[1]])\n if section[0] == Datatype.param_list:\n for field in section[1]:\n self._writer.writerow([field[0], field[1]])", "def _write_header(self, header):\n # write out telescope and source information\n header[\"latitude\"] = self.telescope_location_lat_lon_alt_degrees[0]\n header[\"longitude\"] = self.telescope_location_lat_lon_alt_degrees[1]\n header[\"altitude\"] = self.telescope_location_lat_lon_alt_degrees[2]\n header[\"telescope_name\"] = np.string_(self.telescope_name)\n header[\"instrument\"] = np.string_(self.instrument)\n header[\"object_name\"] = np.string_(self.object_name)\n\n # write out required UVParameters\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"Nfreqs\"] = self.Nfreqs\n header[\"Npols\"] = self.Npols\n header[\"Nspws\"] = self.Nspws\n header[\"Ntimes\"] = self.Ntimes\n header[\"antenna_numbers\"] = self.antenna_numbers\n header[\"uvw_array\"] = self.uvw_array\n header[\"vis_units\"] = np.string_(self.vis_units)\n header[\"channel_width\"] = self.channel_width\n header[\"time_array\"] = self.time_array\n header[\"freq_array\"] = self.freq_array\n header[\"integration_time\"] = self.integration_time\n header[\"lst_array\"] = self.lst_array\n header[\"polarization_array\"] = self.polarization_array\n header[\"spw_array\"] = self.spw_array\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"antenna_positions\"] = self.antenna_positions\n\n # handle antenna_names; works for lists or arrays\n header[\"antenna_names\"] = np.asarray(self.antenna_names, dtype=\"bytes\")\n\n # write out phasing information\n header[\"phase_type\"] = np.string_(self.phase_type)\n if self.phase_center_ra is not None:\n header[\"phase_center_ra\"] = self.phase_center_ra\n if self.phase_center_dec is not None:\n header[\"phase_center_dec\"] = self.phase_center_dec\n if self.phase_center_epoch is not None:\n header[\"phase_center_epoch\"] = self.phase_center_epoch\n if self.phase_center_frame is not None:\n header[\"phase_center_frame\"] = np.string_(self.phase_center_frame)\n\n # write out optional parameters\n if self.dut1 is not None:\n header[\"dut1\"] = self.dut1\n if self.earth_omega is not None:\n header[\"earth_omega\"] = self.earth_omega\n if self.gst0 is not None:\n header[\"gst0\"] = self.gst0\n if self.rdate is not None:\n header[\"rdate\"] = np.string_(self.rdate)\n if self.timesys is not None:\n header[\"timesys\"] = np.string_(self.timesys)\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n if self.blt_order is not None:\n header[\"blt_order\"] = np.string_(\", \".join(self.blt_order))\n if self.antenna_diameters is not None:\n header[\"antenna_diameters\"] = self.antenna_diameters\n if self.uvplane_reference_time is not None:\n header[\"uvplane_reference_time\"] = self.uvplane_reference_time\n if self.eq_coeffs is not None:\n header[\"eq_coeffs\"] = self.eq_coeffs\n if self.eq_coeffs_convention is not None:\n header[\"eq_coeffs_convention\"] = np.string_(self.eq_coeffs_convention)\n\n # write out extra keywords if it exists and has elements\n if self.extra_keywords:\n extra_keywords = header.create_group(\"extra_keywords\")\n for k in self.extra_keywords.keys():\n if isinstance(self.extra_keywords[k], str):\n extra_keywords[k] = np.string_(self.extra_keywords[k])\n else:\n extra_keywords[k] = self.extra_keywords[k]\n\n # write out history\n header[\"history\"] = np.string_(self.history)\n\n return", "def writeHeader( self ):\n for k in self.secondaryTargets.keys():\n fileName = self.treyGene[k] + \"-GenesinCommon.txt\" \n with open( fileName, 'w' ) as out:\n out.write(\"%s\\t%s\\t%s\\n\" %(\"Gene_trey\", \"Gene\", \"Gene_inCommon\" ))\n out.close()", "def create_header(analysis_outdir, metadata, rg_dict, specimen_dict, logger=default_logger):\n\n rgid = rg_dict[\"ID\"].replace(\".\", \"_\")\n header = \"%s/header-%s.sam\" %(analysis_outdir, rg_dict[\"ID\"])\n header_file = open(header, \"w\")\n header_file.write(\"@HD\\tVN:1.4\\n\")\n PI_STR = \"\"\n if len(rg_dict[\"PI\"]):\n PI_STR=\"PI:%s\\t\" % (rg_dict[\"PI\"])\n header_file.write(\"@RG\\tID:%s:%s\\tCN:%s\\tPL:%s\\tPM:%s\\tLB:%s:%s:%s\\t%sSM:%s\\tPU:%s:%s\\tDT:%s\\n\"\n %(metadata[\"center_name\"], rgid,metadata[\"center_name\"], metadata[\"platform\"],metadata[\"platform_model\"], metadata[\"seqtype\"],\n metadata[\"center_name\"], rg_dict[\"LB\"], PI_STR, metadata[\"aliquot_id\"], rg_dict[\"CN\"], rg_dict[\"PU\"], getUTCDate(rg_dict[\"DT\"])))\n header_file.write(\"@CO\\tdcc_project_code:%s-US\\n\" %metadata[\"disease\"])\n header_file.write(\"@CO\\tsubmitter_donor_id:%s\\n\" %metadata[\"participant_id\"])\n header_file.write(\"@CO\\tsubmitter_specimen_id:%s\\n\" %metadata[\"sample_id\"])\n header_file.write(\"@CO\\tsubmitter_sample_id:%s\\n\" %metadata[\"aliquot_id\"])\n\n if metadata[\"sample_type\"] not in specimen_dict:\n msg = \"sample_type %s not found in specimen mapping\" % metadata[\"sample_type\"]\n logger.error(msg)\n if not FORCE_RUN:\n raise HeaderException(msg)\n\n if \"sample_type\" in metadata and metadata[\"sample_type\"] in specimen_dict:\n (icgc_type, sample_class) = specimen_dict[metadata[\"sample_type\"]]\n else:\n icgc_type = \"unknown\"\n sample_class = \"unknown\"\n\n #Sanity check about use_cntl\n if \"use_cntl\" in metadata:\n if metadata[\"use_cntl\"] == \"N/A\" and sample_class == \"tumour\":\n msg = \"Tumour sample requires use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n if sample_class == \"normal\" and metadata[\"use_cntl\"] != \"N/A\":\n msg = \"Normal sample requires N/A use_cntl, set to %s. Are your IDs in the wrong order?\" % metadata[\"use_cntl\"]\n logger.error(msg)\n raise HeaderException(msg)\n\n header_file.write(\"@CO\\tdcc_specimen_type:%s\\n\" % icgc_type)\n header_file.write(\"@CO\\tuse_cntl:%s\\n\" %(metadata.get(\"use_cntl\", \"NA\")))\n header_file.close()\n return header", "def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n \n fileobj.write(csv_line( ['Notes'] + [x.name for x in self.angles] + ['Wait For/n', 'Value'] ) )", "def get_header(self, root):\n header = etree.SubElement(root, \"FileHeader\")\n header.set(\"revMajor\", \"1\")\n header.set(\"revMinor\", \"0\")\n header.set(\"date\", datetime.today().strftime(\"%Y-%m-%dT%H:%M:%S\"))\n header.set(\"description\", \"Generated OpenSCENARIO File\")\n header.set(\"author\", \"QGIS OSCGenerator Plugin\")", "def FIR_header(fname_out,h):\r\n M = len(h)\r\n N = 3 # Coefficients per line\r\n f = open(fname_out,'wt')\r\n f.write('//define a FIR coefficient Array\\n\\n')\r\n f.write('#include <stdint.h>\\n\\n')\r\n f.write('#ifndef M_FIR\\n')\r\n f.write('#define M_FIR %d\\n' % M)\r\n f.write('#endif\\n')\r\n f.write('/************************************************************************/\\n');\r\n f.write('/* FIR Filter Coefficients */\\n');\r\n f.write('float32_t h_FIR[M_FIR] = {')\r\n kk = 0;\r\n for k in range(M):\r\n #k_mod = k % M\r\n if (kk < N-1) and (k < M-1):\r\n f.write('%15.12f,' % h[k])\r\n kk += 1\r\n elif (kk == N-1) & (k < M-1):\r\n f.write('%15.12f,\\n' % h[k])\r\n if k < M:\r\n f.write(' ')\r\n kk = 0\r\n else:\r\n f.write('%15.12f' % h[k]) \r\n f.write('};\\n')\r\n f.write('/************************************************************************/\\n')\r\n f.close()", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def edf_write(data, file_name, header_size=1024):\n # get current time\n from time import gmtime, strftime\n today = strftime('%d-%b-%Y', gmtime())\n size = np.shape(data)\n print('data size in pixels is ', size)\n nbytes = np.prod(size) * data.dtype.itemsize\n print('opening', file_name, 'for writing')\n # craft an ascii header of the appropriate size\n f = open(file_name, 'wb')\n head = '{\\n'\n head += 'HeaderID = EH:000001:000000:000000 ;\\n'\n head += 'Image = 1 ;\\n'\n head += 'ByteOrder = LowByteFirst ;\\n'\n head += 'DataType = %13s;\\n' % numpy_to_esrf_datatype(data.dtype)\n print('using data type %s' % numpy_to_esrf_datatype(data.dtype))\n head += 'Dim_1 = %4s;\\n' % size[0]\n if len(size) > 1: head += 'Dim_2 = %4s;\\n' % size[1]\n if len(size) > 2: head += 'Dim_3 = %4s;\\n' % size[2]\n head += 'Size = %9s;\\n' % nbytes\n head += 'Date = ' + today + ' ;\\n'\n for i in range(header_size - len(head) - 2):\n head += ' '\n head += '}\\n'\n f.write(head.encode('utf-8'))\n if len(data.shape) == 3:\n s = np.ravel(data.transpose(2, 1, 0)).tostring()\n elif len(data.shape) == 2:\n s = np.ravel(data.transpose(1, 0)).tostring()\n else:\n s = np.ravel(data).tostring()\n f.write(s)\n f.close()", "def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n fileobj.write(csv_line( ['Comment'] + [x.name.lower() for x in self.angles] + ['Wait For', 'Value'] ) )", "def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"", "def generate_headerfile(template, n_division=10000, df=6, start_chi=25, filepath=\"Chi2PLookup.h\", verbose=False):\n divisor = \"const int Chi2PLookup::divisor = {};\".format(n_division)\n\n names = []\n cutoff = []\n p_values_arrays = []\n degrees_of_freedom = range(1, df+1)\n\n if verbose:\n print(\"Generating p-value arrays...\")\n print(\" df={}\".format(df))\n print(\" precision={}\".format(n_division))\n\n for df in degrees_of_freedom:\n var_name = \"pValues_{}\".format(df)\n names.append(var_name)\n max_chi = max_chi_value(df=df, start_chi=start_chi)\n cutoff.append(max_chi)\n n_elements = max_chi * n_division\n\n chi_values = (val / n_division for val in range(0, n_elements + 1))\n p_values = (str(1 - chi2.cdf(val, df)) for val in chi_values)\n\n if verbose:\n print(\"\\tAdding p-values array to template for degree of freedom = {} ...\".format(df))\n\n p_values_arrays.append(\"double {}[] = {{{}}};\".format(var_name, \", \".join(p_values)))\n\n cutoff_array = \"const int Chi2PLookup::cutoff[] = {{{}}};\".format(\", \".join([str(i) for i in cutoff]))\n p_values_array_of_arrays = \"const double * Chi2PLookup::pValues[] = {{{}}};\\n\".format(\", \".join(names))\n\n template = template.format(divisor, cutoff_array, \"\\n\".join(p_values_arrays), p_values_array_of_arrays)\n\n if verbose:\n print(\"Saving file to: {}\".format(os.path.abspath(filepath)))\n\n with open(filepath, \"w\") as outfile:\n outfile.write(template)\n\n return template", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def WriteHeader(self):\n return", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.html\", \"w\")\n file.write(\"<html>\\n\\t<head>\\n\\t<style>\\n\" +\n \"\\t\\t\\ttable, th, td {border: 1px solid\\n\" +\n \"\\t\\t\\tblack;border-collapse: collapse;}\" +\n \"\\n\\t</style>\\n\" +\n \"\\t</head>\\n\\t<body>\\n\\t\\t<table style=\\\"width:100%\\\">\\n\")\n file.write(\"\\t\\t\\t<tr>\\n\")\n for line in hdata:\n file.write(\n \"\\t\\t\\t\\t\\t<th>\\n\\t\\t\\t\\t\\t\\t\"\n + str(line) + \"\\n\\t\\t\\t\\t\\t</th>\\n\")\n file.write(\"\\t\\t\\t</tr>\\n\")", "def make_header(args):\n header = os.path.join(args.output_dir,'header.sam')\n args.header = header\n header_handle = open(header,'w')\n header_handle.write('@HD\\tVN:1.4\\n')\n joined_sam = open(os.path.join(args.output_dir, 'watson_joinedAligned.out.sam'))\n merged_sam = open(os.path.join(args.output_dir, 'watson_mergedAligned.out.sam'))\n for line in joined_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n else:\n break\n for line in merged_sam:\n if line.startswith('@'):\n if line.startswith('@SQ'):\n header_handle.write(line)\n elif not line.startswith('@HD'):\n header_handle.write(line)\n else:\n break\n header_handle.close()\n in_files = {'header':os.path.join(args.output_dir,'header.sam')}\n addRG(in_files, args)\n return args", "def generate_output_file(data, extension, headers):\n output_data = _replace_boolean(data)\n output_name = _generate_output_name(extension)\n with open(output_name, 'a', newline='') as file:\n _file_writer(file, extension, output_data, headers)", "def _write_header(self):\n # The last line here must not have a trailing \\n\n self.buffer.write_line(\"def template(self, __io, model=None):\")\n self.buffer.scope_line(\"view = self\")", "def write_header(filename, data, lima):\n\tfrom utilities import file_type\n\tfrom EMAN2db import db_open_dict\n\n\tftp = file_type(filename)\n\tif ftp == \"bdb\":\n\t\tDB = db_open_dict(filename)\n\t\tDB.set_header(lima, data)\n\telif ftp == \"hdf\":\n\t\tdata.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)\n\telse:\n\t\tERROR(\"Unacceptable file format\",\"write_headers\",1)", "def writePOVRAYHeader(self, fh):\n settings = self.mainWindow.preferences.povrayForm\n\n focalPoint = self.camera.GetFocalPoint()\n campos = self.camera.GetPosition()\n viewup = self.camera.GetViewUp()\n angle = settings.viewAngle\n if settings.shadowless:\n shadowless = \"shadowless \"\n else:\n shadowless = \"\"\n\n if self.parent.blackBackground:\n rval = gval = bval = 0\n else:\n rval = gval = bval = 1\n\n fh.write(\"camera { perspective location <%f,%f,%f>\\n\" % (- campos[0], campos[1], campos[2]))\n fh.write(\" look_at <%f,%f,%f>\\n\" % (- focalPoint[0], focalPoint[1], focalPoint[2]))\n fh.write(\" angle %f\\n\" % angle)\n fh.write(\" sky <%f,%f,%f> }\\n\" % (- viewup[0], viewup[1], viewup[2]))\n fh.write(\"light_source { <%f,%f,%f> color rgb <1,1,1> %s }\\n\" % (- campos[0], campos[1], campos[2], shadowless))\n fh.write(\"background { color rgb <%f,%f,%f> }\\n\" % (rval, gval, bval))", "def _writeVariablesHeaderSection(self):\n self.header.write(wrapLine(\"NV\", self.annotation, self.delimiter, \"%d\\n\" % self.NV))\n self.header.write(wrapLine(\"VSCAL\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VSCAL)))\n self.header.write(wrapLine(\"VMISS\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VMISS)))\n self.header.write(wrapLines(\"VNAME\", self.annotation, self.delimiter, \"%s\\n\" * self.NV % tuple(self.VNAME)))", "def make_header_files():\n os.makedirs(DATA_DIR) if not os.path.exists(DATA_DIR) else None\n from dkistdataratemodel.units import frame\n from dkist_data_model.generator.dataproducts.visp import CalibratedVISP\n\n \"\"\"\n Generate VISP\n \"\"\"\n visp = CalibratedVISP(end_condition=20*frame)\n\n visp_files = visp.to_fits(\"sp_5_labelled\",\n path_template=os.path.join(DATA_DIR, 'visp_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"visp.zip\"), \"w\") as myzip:\n for fname in visp_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)\n\n \"\"\"\n Generate VTF\n \"\"\"\n from dkist_data_model.generator.dataproducts.vtf import CalibratedVTF\n vtf = CalibratedVTF(end_condition=96*frame)\n\n vtf_files = vtf.to_fits(\"5d_test\",\n path_template=os.path.join(DATA_DIR, 'vtf_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"vtf.zip\"), \"w\") as myzip:\n for fname in vtf_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)", "def write_to_carray(num_matrix, array_name, file_path):\n var_start = \"unsigned short int \" + array_name + \" [] = {\\n\\t\"\n var_end = \"};\\n\"\n var_values = [] \n for row in num_matrix:\n for cell in row:\n var_values.append(str(cell))\n \n f = open(file_path, \"w\")\n f.write(\"// Auto generated image header file\\n\")\n f.write(var_start)\n f.write(\",\\n\\t\".join(var_values) + \"\\n\")\n f.write(var_end)\n f.close()", "def write_header(self, fd):\n fd.write(f\"BEGIN {self.name}\")\n if len(self.data_items) > 0:\n if isinstance(self.data_items[0], mfdatascalar.MFScalar):\n one_based = (\n self.data_items[0].structure.type == DatumType.integer\n )\n entry = self.data_items[0].get_file_entry(\n values_only=True, one_based=one_based\n )\n else:\n entry = self.data_items[0].get_file_entry()\n fd.write(str(entry.rstrip()))\n if len(self.data_items) > 1:\n for data_item in self.data_items[1:]:\n entry = data_item.get_file_entry(values_only=True)\n fd.write(\"%s\" % (entry.rstrip()))\n if self.get_comment().text:\n fd.write(\" \")\n self.get_comment().write(fd)\n fd.write(\"\\n\")", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def writeTableHeader(self, fileName, variant=0):\r\n # research\r\n w = slicer.modules.NeedleFinderWidget\r\n l = w.logic\r\n if not variant:\r\n l.exportEvaluation(['user','case','maxTipHD','maxHD', 'avgHD', 'stdHD', 'medHD',\r\n 'nNeedles','nOutliers','outliers',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n 'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)\r\n else:\r\n l.exportEvaluation(['user','case','tipHD','HD', 'man.-seg_', 'ID1', 'ID2',\r\n 'outlier?',\r\n 'radiusNeedle',\r\n 'lenghtNeedle',\r\n 'radiusMax',\r\n 'numberOfPointsPerNeedle',\r\n 'nbRotatingIterations',\r\n 'stepSize',\r\n 'gradientPonderation',\r\n 'exponent',\r\n 'gaussianAttenuationButton',\r\n 'sigma',\r\n 'algoV',\r\n #'case',\r\n t.strftime(\"%d/%m/%Y\"), t.strftime(\"%H:%M:%S\")\r\n ], fileName)", "def WriteHeader(self, output_mediator): # pylint: disable=unused-argument\n return", "def write_header(self):\n lines = [\"\"]\n\n for key in self._header_keys:\n value = self.get_attr_from_name(key)\n if isinstance(value, list):\n value = \",\".join([f\"{v:.1f}\" for v in value])\n elif isinstance(value, (float)):\n value = f\"{value:.7f}\"\n elif isinstance(value, (int)):\n value = f\"{value:.0f}\"\n\n key = (\n key.replace(\"_\", \" \")\n .title()\n .replace(\" \", \"\")\n .replace(\"MTEdit.\", \"MTEdit:\")\n )\n\n lines.append(f\"${key}={value.capitalize()}\")\n\n return lines", "def write_file(self,filename):\n \n with open(filename, 'w') as f:\n tab_width = np.max([len(k) for k in self.header.keys()])\n for k,v in self.header.items():\n f.write(u'{0}:\\t{1}\\n'.format(k, v).encode('utf8').expandtabs(tab_width+2))\n np.savetxt(f, self.data, fmt ='%f %f %f %d')", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.csv\", \"w\")\n file.write(str(\",\".join(hdata)) + \"\\n\")", "def make_odb_header(odbfile, dataset):\n \n header = 'headers/' + dataset + '_header.dat'\n \n if not os.path.isfile ( header ):\n print(' Creating the header file for the dataset: ', dataset )\n if dataset in ('era5_1','era5_2'):\n \n odbfile = odbfile.replace('.gz','')\n else:\n odbfile = odbfile.replace('.gz','').replace('.conv._','.conv.')\n \n rdata=subprocess.check_output([\"odb\",\"header\", odbfile ])\n \n with open( header , 'wb' ) as f:\n f.write(rdata) \n \n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n') \n \n else:\n f = open(header , 'rb')\n rdata=f.read()\n rdata=rdata.decode('utf-8').split('\\n')\n #print(' Done reading the existing header file for the dataset: ', dataset )\n \n columns, kinds, tdict =[] , [] , {} \n \n for r in rdata[2:-2]:\n try:\n \n if r[:6]=='Header':\n break\n else: \n columns.append(r.split('name: ')[1].split(',')[0])\n kinds.append(r.split('type: ')[1].split(',')[0])\n if kinds[-1]=='REAL':\n tdict[columns[-1]]=numpy.float32\n elif 'INTEGER' in kinds[-1] or 'BITFIELD' in kinds[-1]:\n #print(columns[-1])\n if columns[-1]=='sonde_type@conv' or columns[-1]=='station_type@conv':\n tdict[columns[-1]]=numpy.float32\n else: \n tdict[columns[-1]]=numpy.int32\n else:\n tdict[columns[-1]]=numpy.dtype('S') # dict containng column name and type\n \n except IndexError:\n pass \n \n \"\"\" This is done otherwise for the era5 databases (1759,1761,3188) the tdict has different length than the columns list.\n So the following call alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) breaks \"\"\" \n for t in tdict.keys():\n if t not in columns:\n #print(\"Removing non appearing fb column: \" , c) \n del tdict[t]\n \n \"\"\" These values must be removed rom the fb, since they have NULL values and it creates problem with \n alldict=pd.read_csv(f,delimiter='\\t', usecols=columns, quoting=3,comment='#', skipinitialspace=True, dtype=tdict) \"\"\" \n \n if dataset in [\"era5_1759\", \"era5_1761\", \"era5_3188\"]:\n remove = ['sonde_type@conv' , \"eda_spread@errstat\", \"bias_volatility@body\" , \"timeseries_index@conv\"]\n for c in remove:\n #print(\"Removing wrong fb column: \" , c)\n try:\n columns.remove(c)\n del tdict[c]\n except:\n pass\n return columns, kinds, tdict", "def ROCKSTAR_binary():\n header_size = 256 #Bytes, size of the header\n halo_struct_size = 264 #Bytes, properties stored for one halo using dtype structure dt (260 from struct 'halo' in halo.h from ROCKSTAR and \n #4 bytes probably from max_metric from struct 'extra_halo_info' in halo.h)\n bytes_to_header_info = 64 #bytes until the header info starts\n \n dt_header_info = [ \n ('n_halos' , np.int64), #total number of halos in this file\n ('tot_n_particles' , np.int64), #total number of particles in this file \n ('box_size' , np.float32), #side lenght in Mpc/h of simulation box\n ('m_particles' , np.float32), #mass of one particle in h-1Msun\n ('type_particles' , np.int64) #type of particle (either 1=halo, star, gas etc.) \n ]\n \n dt = [\n ('haloid' , np.int64), #int64_t id\n ('x_pos' , np.float32), #float pos[6], 1\n ('y_pos' , np.float32), #float pos[6], 2\n ('z_pos' , np.float32), #float pos[6], 3\n ('pos4' , np.float32), #float pos[6], 4\n ('pos5' , np.float32), #float pos[6], 5\n ('pos6' , np.float32), #float pos[6], 6 \n ('x_corevel' , np.float32), #float corevel[3], 1\n ('y_corevel' , np.float32), #float corevel[3], 2\n ('z_corevel' , np.float32), #float corevel[3], 3 \n ('x_vel_bulk' , np.float32), #float bulkvel[3], 1\n ('y_vel_bulk' , np.float32), #float bulkvel[3], 2\n ('z_vel_bulk' , np.float32), #float bulkvel[3], 3\n ('mhalo' , np.float32), #float m \n ('rvir' , np.float32), #float r \n ('rvir_child' , np.float32), #float child_r\n ('vmax_r' , np.float32), #float vmax_r\n ('mhalo_bound' , np.float32), #float mgrav\n ('vmax' , np.float32), #float vmax\n ('vpeak' , np.float32), #float rvmax\n ('rscale' , np.float32), #float rs\n ('rscale_Klypin' , np.float32), #float klypin_rs\n ('vrms' , np.float32), #float vrms\n ('x_ang' , np.float32), #float J[3], 1\n ('y_ang' , np.float32), #float J[3], 2\n ('z_ang' , np.float32), #float J[3], 3\n ('energy' , np.float32), #float energy \n ('spinParameter' , np.float32), #float spin\n ('mhalo_200b' , np.float32), #float alt_m[4], 1 \n ('mhalo_200c' , np.float32), #float alt_m[4], 2 \n ('mhalo_500c' , np.float32), #float alt_m[4], 3 \n ('mhalo_2500c' , np.float32), #float alt_m[4], 4 \n ('x_off' , np.float32), #float Xoff\n ('v_off' , np.float32), #float Voff\n ('b_to_a' , np.float32), #float b_to_a \n ('c_to_a' , np.float32), #float c_to_a\n ('x_a' , np.float32), #float A[3], 1\n ('y_a' , np.float32), #float A[3], 2\n ('z_a' , np.float32), #float A[3], 3 \n ('b_to_a_500c' , np.float32), #float b_to_a2\n ('c_to_a_500c' , np.float32), #float c_to_a2\n ('x_a_500c' , np.float32), #float A2[3], 1 \n ('y_a_500c' , np.float32), #float A2[3], 2\n ('z_a_500c' , np.float32), #float A2[3], 3 \n ('spin_Bullock' , np.float32), #float bullock_spin\n ('T_U' , np.float32), #float kin_to_pot\n ('Mpseudo_Behroozi', np.float32), #float m_pe_b \n ('Mpseudo_Diemer' , np.float32), #float m_pe_d\n ('rhalf_mass' , np.float32), #float halfmass_radius\n ('n_particles' , np.int64), #int64_t num_p\n ('n_particles_child', np.int64), #int64_t num_child_particles \n ('p_start' , np.int64), #int64_t p_start\n ('descIndex' , np.int64), #int64_t desc\n ('flags' , np.int64), #int64_t flags\n ('n_core' , np.int64), #int64_t n_core\n ('PosUncertainty' , np.float32), #float min_pos_err\n ('VelUncertainty' , np.float32), #float min_vel_err\n ('BulkVelUnc' , np.float32), #float min_bulkvel_err\n ('mmetric' , np.float32) #unclear where it comes from, it might be mmetric \n ]\n \n return header_size, halo_struct_size, dt, dt_header_info, bytes_to_header_info", "def _create_hdr_obj(self, pix_len, pix_scale):\n hdr = astropy.io.fits.Header()\n hdr['NAXIS'] = 2\n hdr['NAXIS1'] = pix_len\n hdr['NAXIS2'] = pix_len\n hdr['CTYPE1'] = 'RA---TAN'\n hdr['CRVAL1'] = float(self.ra_ctr)\n hdr['CRPIX1'] = (pix_len / 2.) * 1.\n hdr['CDELT1'] = -1.0 * pix_scale\n hdr['CTYPE2'] = 'DEC--TAN'\n hdr['CRVAL2'] = float(self.dec_ctr)\n hdr['CRPIX2'] = (pix_len / 2.) * 1.\n hdr['CDELT2'] = pix_scale\n hdr['EQUINOX'] = 2000\n return hdr", "def write_header(outfbfile, header_params, header):\n for hp in header_params:\n hdrval = sigproc.addto_hdr(hp, header[hp])\n outfbfile.write(hdrval)", "def write_annotations(self, output_file):\n logging.info(self._header)\n np.savetxt(output_file, self._zeroes, header=\" \".join(self._header),fmt='%i',comments='')", "def _generate_header_files(self):\n return True", "def write_header_input(units, sigma, freq, mu_r, ndec, out_inp):\n out_inp.writelines(\"*Input to calculate current distribution in a wire\\n\")\n out_inp.writelines(\"*Conductivity\" + \"\\n\")\n # sigma_str = '%.2E' % sigma\n freq_int = freq * mu_r\n freq_int_str = '%.2E' % freq_int # this makes FH use the effective frequency\n\n # out_inp.writelines(\".Default \"+ \"sigma=\"+sigma_str+\"\\n\")\n out_inp.writelines(\".Units \" + units + \"\\n\")\n out_inp.writelines(\"*Frequency range of interest\" + \"\\n\")\n out_inp.writelines(\"*Note that this is an effective frequency to include FMs\" + \"\\n\")\n out_inp.writelines(\".freq \" + \"fmin=\" + freq_int_str + \" fmax=\" + freq_int_str + \" ndec=\"\n + str(ndec) + \"\\n\")\n out_inp.close()", "def WriteHeaderFileForSrnModel(filename, model): \n\n srn_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(srn_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(srn_model_name + \".hpp written!\\n\")", "def generate_headers(self):\n raise NotImplementedError()", "def create_xif_header(dataset_name: str) -> str:\n xif_rule = \"\"\n xif_rule += f\"[MODEL: dataset={dataset_name}]\\n\"\n xif_rule += \"alter\\n\"\n return xif_rule", "def write_uvh5_part(\n self,\n filename,\n data_array,\n flag_array,\n nsample_array,\n check_header=True,\n antenna_nums=None,\n antenna_names=None,\n ant_str=None,\n bls=None,\n frequencies=None,\n freq_chans=None,\n times=None,\n time_range=None,\n polarizations=None,\n blt_inds=None,\n run_check_acceptability=True,\n add_to_history=None,\n ):\n # check that the file already exists\n if not os.path.exists(filename):\n raise AssertionError(\n \"{0} does not exists; please first initialize it with \"\n \"initialize_uvh5_file\".format(filename)\n )\n\n if check_header:\n self._check_header(\n filename, run_check_acceptability=run_check_acceptability\n )\n\n # figure out which \"full file\" indices to write data to\n blt_inds, freq_inds, pol_inds, _ = self._select_preprocess(\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n polarizations,\n blt_inds,\n )\n\n # make sure that the dimensions of the data to write are correct\n if data_array.shape != flag_array.shape:\n raise AssertionError(\"data_array and flag_array must have the same shape\")\n if data_array.shape != nsample_array.shape:\n raise AssertionError(\n \"data_array and nsample_array must have the same shape\"\n )\n\n # check what part of each dimension to grab\n # we can use numpy slice objects to index the h5py indices\n if blt_inds is not None:\n Nblts = len(blt_inds)\n\n # test if blts are regularly spaced\n if len(set(np.ediff1d(blt_inds))) <= 1:\n blt_reg_spaced = True\n blt_start = blt_inds[0]\n blt_end = blt_inds[-1] + 1\n if len(blt_inds) == 1:\n d_blt = 1\n else:\n d_blt = blt_inds[1] - blt_inds[0]\n blt_inds = np.s_[blt_start:blt_end:d_blt]\n else:\n blt_reg_spaced = False\n else:\n Nblts = self.Nblts\n blt_reg_spaced = True\n blt_inds = np.s_[:]\n if freq_inds is not None:\n Nfreqs = len(freq_inds)\n\n # test if frequencies are regularly spaced\n if len(set(np.ediff1d(freq_inds))) <= 1:\n freq_reg_spaced = True\n freq_start = freq_inds[0]\n freq_end = freq_inds[-1] + 1\n if len(freq_inds) == 1:\n d_freq = 1\n else:\n d_freq = freq_inds[1] - freq_inds[0]\n freq_inds = np.s_[freq_start:freq_end:d_freq]\n else:\n freq_reg_spaced = False\n else:\n Nfreqs = self.Nfreqs\n freq_reg_spaced = True\n freq_inds = np.s_[:]\n if pol_inds is not None:\n Npols = len(pol_inds)\n\n # test if pols are regularly spaced\n if len(set(np.ediff1d(pol_inds))) <= 1:\n pol_reg_spaced = True\n pol_start = pol_inds[0]\n pol_end = pol_inds[-1] + 1\n if len(pol_inds) == 1:\n d_pol = 1\n else:\n d_pol = pol_inds[1] - pol_inds[0]\n pol_inds = np.s_[pol_start:pol_end:d_pol]\n else:\n pol_reg_spaced = False\n else:\n Npols = self.Npols\n pol_reg_spaced = True\n pol_inds = np.s_[:]\n\n # check for proper size of input arrays\n proper_shape = (Nblts, 1, Nfreqs, Npols)\n if data_array.shape != proper_shape:\n raise AssertionError(\n \"data_array has shape {0}; was expecting {1}\".format(\n data_array.shape, proper_shape\n )\n )\n\n # actually write the data\n with h5py.File(filename, \"r+\") as f:\n dgrp = f[\"/Data\"]\n visdata_dset = dgrp[\"visdata\"]\n flags_dset = dgrp[\"flags\"]\n nsamples_dset = dgrp[\"nsamples\"]\n visdata_dtype = visdata_dset.dtype\n if visdata_dtype not in (\"complex64\", \"complex128\"):\n custom_dtype = True\n else:\n custom_dtype = False\n\n # check if we can do fancy indexing\n # as long as at least 2 out of 3 axes can be written as slices,\n # we can be fancy\n n_reg_spaced = np.count_nonzero(\n [blt_reg_spaced, freq_reg_spaced, pol_reg_spaced]\n )\n if n_reg_spaced >= 2:\n if custom_dtype:\n indices = (blt_inds, np.s_[:], freq_inds, pol_inds)\n _write_complex_astype(data_array, visdata_dset, indices)\n else:\n visdata_dset[blt_inds, :, freq_inds, pol_inds] = data_array\n flags_dset[blt_inds, :, freq_inds, pol_inds] = flag_array\n nsamples_dset[blt_inds, :, freq_inds, pol_inds] = nsample_array\n elif n_reg_spaced == 1:\n # figure out which axis is regularly spaced\n if blt_reg_spaced:\n for ifreq, freq_idx in enumerate(freq_inds):\n for ipol, pol_idx in enumerate(pol_inds):\n if custom_dtype:\n indices = (blt_inds, np.s_[:], freq_idx, pol_idx)\n _write_complex_astype(\n data_array[:, :, ifreq, ipol], visdata_dset, indices\n )\n else:\n visdata_dset[\n blt_inds, :, freq_idx, pol_idx\n ] = data_array[:, :, ifreq, ipol]\n flags_dset[blt_inds, :, freq_idx, pol_idx] = flag_array[\n :, :, ifreq, ipol\n ]\n nsamples_dset[\n blt_inds, :, freq_idx, pol_idx\n ] = nsample_array[:, :, ifreq, ipol]\n elif freq_reg_spaced:\n for iblt, blt_idx in enumerate(blt_inds):\n for ipol, pol_idx in enumerate(pol_inds):\n if custom_dtype:\n indices = (blt_idx, np.s_[:], freq_inds, pol_idx)\n _write_complex_astype(\n data_array[iblt, :, :, ipol], visdata_dset, indices\n )\n else:\n visdata_dset[\n blt_idx, :, freq_inds, pol_idx\n ] = data_array[iblt, :, :, ipol]\n flags_dset[blt_idx, :, freq_inds, pol_idx] = flag_array[\n iblt, :, :, ipol\n ]\n nsamples_dset[\n blt_idx, :, freq_inds, pol_idx\n ] = nsample_array[iblt, :, :, ipol]\n else: # pol_reg_spaced\n for iblt, blt_idx in enumerate(blt_inds):\n for ifreq, freq_idx in enumerate(freq_inds):\n if custom_dtype:\n indices = (blt_idx, np.s_[:], freq_idx, pol_inds)\n _write_complex_astype(\n data_array[iblt, :, ifreq, :], visdata_dset, indices\n )\n else:\n visdata_dset[\n blt_idx, :, freq_idx, pol_inds\n ] = data_array[iblt, :, ifreq, :]\n flags_dset[blt_idx, :, freq_idx, pol_inds] = flag_array[\n iblt, :, ifreq, :\n ]\n nsamples_dset[\n blt_idx, :, freq_idx, pol_inds\n ] = nsample_array[iblt, :, ifreq, :]\n else:\n # all axes irregularly spaced\n # perform a triple loop -- probably very slow!\n for iblt, blt_idx in enumerate(blt_inds):\n for ifreq, freq_idx in enumerate(freq_inds):\n for ipol, pol_idx in enumerate(pol_inds):\n if custom_dtype:\n indices = (blt_idx, np.s_[:], freq_idx, pol_idx)\n _write_complex_astype(\n data_array[iblt, :, ifreq, ipol],\n visdata_dset,\n indices,\n )\n else:\n visdata_dset[\n blt_idx, :, freq_idx, pol_idx\n ] = data_array[iblt, :, ifreq, ipol]\n flags_dset[blt_idx, :, freq_idx, pol_idx] = flag_array[\n iblt, :, ifreq, ipol\n ]\n nsamples_dset[\n blt_idx, :, freq_idx, pol_idx\n ] = nsample_array[iblt, :, ifreq, ipol]\n\n # append to history if desired\n if add_to_history is not None:\n history = np.string_(self.history) + np.string_(add_to_history)\n if \"history\" in f[\"Header\"]:\n # erase dataset first b/c it has fixed-length string datatype\n del f[\"Header\"][\"history\"]\n f[\"Header\"][\"history\"] = np.string_(history)\n\n return", "def _write_header(self):\n msg = self._write_executive_control_deck()\n msg += self._write_case_control_deck()\n return msg", "def writeheader(filename, header):\n # convert string to [unsigned] byte array\n hh = np.zeros(512, dtype='uint8')\n for i, ss in enumerate(header):\n hh[i] = ord(ss)\n # write header to file\n file_arr = np.memmap(filename, dtype='uint8', mode='r+', shape=(512,))\n file_arr[:512] = hh[:]\n del file_arr\n return", "def write_header(self, *, version=3.01, file_type='O: Observation', satellite_type='M: Mixed GNSS',\n run_by='GPSLiDAR', organization='CCAR', observer='Adam Dodge', agency='CCAR', receiver_num='1',\n receiver_type='GENERIC_P1', receiver_vers='1.0.0', antenna_number=1, antenna_type='RTK2-F9P',\n delta_pos=[0,0,0]):\n markerstr = 'GPS LiDAR System at ' + self.longname\n if not os.path.isfile(self.fname):\n tstr = self.t.strftime('%Y%m%d %H%M%S')\n # TODO: Fix header (not working in readers)\n r = 6371000 + self.alt\n x = r * np.cos(self.lat * np.pi/180) * np.cos(self.lon * np.pi/180)\n y = r * np.cos(self.lat * np.pi/180) * np.sin(self.lon * np.pi/180)\n z = r * np.sin(self.lat * np.pi/180)\n header = f'{version:>9.2f}{\" \":<11s}{file_type:<20s}{satellite_type:<20s}{\"RINEX VERSION / TYPE\":<20s}\\n' + \\\n f'{run_by:<20s}{organization:<20s}{tstr:<16s}UTC {\"PGM / RUN BY / DATE\":<20s}\\n' + \\\n f'{markerstr:<60}{\"MARKER NAME\":<20s}\\n' + \\\n f'{self.station:<60}{\"MARKER NUMBER\":<20s}\\n' + \\\n f'{\"GEODETIC\":<20s}{\" \":40s}{\"MARKER TYPE\":<20s}\\n' + \\\n f'{observer:<20}{agency:<40}{\"OBSERVER / AGENCY\":<20s}\\n' + \\\n f'{receiver_num:<20}{receiver_type:<20}{receiver_vers:<20}{\"REC # / TYPE / VERS\":<20s}\\n' + \\\n f'{antenna_number:<20}{antenna_type:<40s}{\"ANT # / TYPE\":<20s}\\n' + \\\n f'{x:14.4f}{y:>14.4f}{z:>14.4f}{\" \":18s}{\"APPROX POSITION XYZ\":<20s}\\n' + \\\n f'{delta_pos[0]:14.4f}{delta_pos[1]:>14.4f}{delta_pos[2]:>14.4f}{\" \":18s}{\"ANTENNA: DELTA H/E/N\":<20s}\\n' + \\\n f'G {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'R {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'E {8:<3d} C1 L1 D1 S1 C2 L2 D2 S2 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'S {8:<3d} C1 L1 D1 S1 C5 L5 D5 S5 {\"SYS / # / OBS TYPES\":<20s}\\n' + \\\n f'{\"DBHZ\":<60s}{\"SIGNAL STRENGTH UNIT\":<20s}\\n' + \\\n f'{self.t.year:>6d}{self.t.month:>6d}{self.t.day:>6d}{self.t.hour:>6d}{self.t.minute:>6d}' + \\\n f'{self.t.second:>13.7f} UTC{\" \":<9s}{\"TIME OF FIRST OBS\":<20s}\\n' + \\\n f' 0{\" \":54s}{\"RCV CLOCK OFFS APPL\":<20s}\\n' + \\\n f'G{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'R{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'E{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'S{\" \":<59}{\"SYS / PHASE SHIFTS\":<20s}\\n' + \\\n f'{self.leapS:>6d}{\" \":>54s}{\"LEAP SECONDS\":<20s}\\n' + \\\n f'{\" \":>60s}{\"END OF HEADER\":<20s}\\n'\n\n try:\n with open(self.fname, 'w') as f:\n f.write(header)\n except FileNotFoundError:\n print('Data directory is bad. Try again.')\n sys.exit(0)", "def get_export_header(self):\n\n name = self.get_name()\n\n if (self.name == \"input::nodes\"):\n\n name = \"user-specified\"\n\n grp_string = self.get_grp_string()\n\n if grp_string != \"\":\n\n grp_string = \" \" + grp_string\n\n return \"\\n!*!Label \" + self.path[1] + \" ..\" + grp_string + \" .. \" + name + \"\\n\"", "def print_header(module, fd):\n module_name = str(module.arg)\n header = OrderedDict()\n header['swagger'] = '2.0'\n header['info'] = {\n 'description': '%s API generated from %s' % (\n module_name, module.pos.ref.rsplit('/')[-1]),\n 'version': '1.0.0',\n 'title': str(module_name + ' API')\n }\n header['host'] = 'localhost:8080'\n # TODO: introduce flexible base path. (CLI options?)\n header['basePath'] = '/restconf'\n header['schemes'] = ['http']\n return header", "def gen_model_header(env: jinja2.environment.Environment, model: onnx.ModelProto) -> str:\n header_template = env.get_template(\"model_header.dml.jinja\")\n header_infos = dict()\n\n header_infos[\"ir_version\"] = model.ir_version\n opset_import = list()\n for opset in model.opset_import:\n if len(opset.domain) == 0:\n opset.domain = \"ONNX\"\n opset_import.append(opset.domain + \"/\" + str(opset.version))\n header_infos[\"producer_name\"] = model.producer_name\n header_infos[\"producer_version\"] = model.producer_version\n header_infos[\"domain\"] = model.domain\n header_infos[\"model_version\"] = model.model_version\n header_infos[\"doc_string\"] = model.doc_string\n metadata_props = [[prop.key, prop.vale] for prop in model.metadata_props]\n\n model_header_render = header_template.render(\n header_components=header_infos,\n opset_import=opset_import,\n metadata_props=metadata_props\n )\n return model_header_render", "def generate_header(gene, variant):\n return '>{}_{}'.format(gene, variant)", "def gen_header(cmd_list):\n\ts = \"/* Warning: This file is automatically generated. Do not modify. */\\n\"\n\ts += \"#ifndef COMMGEN_H\\n\"\n\ts += \"#define COMMGEN_H\\n\\n\"\n\ts += \"#ifdef __cplusplus\\n\"\n\ts += \"extern \\\"C\\\" {\\n\"\n\ts += \"#endif\\n\\n\"\n\ts += \"#include <stdint.h>\\n\\n\"\n\ts += gen_struct_def(cmd_list)\n\ts += \"/* To avoid the volatile qualifier being a pain in the ass, the main loop\\n\"\n\ts += \" * accesses the DataReal struct through this pointer. */\\n\"\n\ts += \"extern volatile struct comm_data_t *Data;\\n\\n\"\n\ts += \"/* Parse a packet, update the struct, and send a reply. */\\n\"\n\ts += \"void parse_packet(uint8_t *buf, uint16_t count);\\n\\n\"\t\n\tfor c in cmd_list:\n\t\ts += gen_send_proto(c) + \"\\n\"\n\t\ts + gen_parse_proto(c) + \"\\n\"\n\ts += gen_packing_protos()\n\ts += gen_build_str_dec()\n\t#s += \"void send_packet(uint8_t *data, uint16_t count);\\n\\n\"\n\ts += \"#ifdef __cplusplus\\n\"\n\ts += \"}\\n\"\n\ts += \"#endif\\n\\n\"\t\n\ts += \"#endif\\n\"\n\treturn s", "def _get_numpy_headers(directory):\n sys.path.insert(0, directory)\n import numpy\n\n include_dir = os.path.relpath(numpy.get_include(), directory)\n sys.path.pop(0)\n return \"\"\"\ncc_library(\n name = \"headers\",\n hdrs = glob([\"{include_dir}/**/*.h\"]),\n includes = [\"{include_dir}\"],\n)\n\"\"\".format(\n include_dir=include_dir\n )", "def _create_hdr_output(self, size_degrees, pixel_scale, factor=1):\n pix_len = int(np.ceil(size_degrees * factor / pixel_scale))\n hdr = self._create_hdr_obj(pix_len, pixel_scale)\n ri_targ, di_targ = self._make_axes(hdr)\n sz_out = ri_targ.shape\n outim = ri_targ * np.nan\n\n prihdu = astropy.io.fits.PrimaryHDU(data=outim, header=hdr)\n target_hdr = prihdu.header\n\n suff = '_template.hdr'\n if factor != 1:\n suff = suff.replace('.hdr', '_ext.hdr')\n header_file = os.path.join(self.gal_dir, self.name + suff)\n self.write_headerfile(header_file, target_hdr)\n\n return target_hdr, header_file", "def binary_out(array, fnam, dt=np.dtype(np.float64), endianness='big', appendDim=False):\r\n if appendDim == True :\r\n fnam_out = fnam + '_'\r\n for i in array.shape[:-1] :\r\n fnam_out += str(i) + 'x' \r\n fnam_out += str(array.shape[-1]) + '.raw'\r\n else :\r\n fnam_out = fnam\r\n arrayout = np.array(array, dtype=dt)\r\n if sys.byteorder != endianness:\r\n arrayout.byteswap(True)\r\n arrayout.tofile(os.path.abspath(fnam_out))", "def writeHeading(fil, nodes, elems, text=''): #currently only for hexahedral mesh\n fil.write(\" CONTROL INFO 2.2.30\\n\")\n fil.write(\"** GAMBIT NEUTRAL FILE\\n\")\n fil.write('%s\\n' %text)\n fil.write('PROGRAM: Gambit VERSION: 2.2.30\\n')\n fil.write(strftime('%d %b %Y %H:%M:%S\\n', gmtime()))\n fil.write(' NUMNP NELEM NGRPS NBSETS NDFCD NDFVL\\n')\n fil.write('%10i%10i%10i%10i%10i%10i\\n' % (shape(nodes)[0],shape(elems)[0],1,0,3,3))\n fil.write('ENDOFSECTION\\n')", "def write_cpp_source(self):\n if len(self.header_path_prefix) > 0 and self.header_path_prefix[-1] != os.sep:\n self.header_path_prefix += os.sep\n\n with open(self.class_name + \"Coeffs.cpp\", \"w\") as source_file:\n print(\n '#include \"'\n + self.header_path_prefix\n + self.class_name\n + \"Coeffs.\"\n + self.header_extension\n + '\"'\n + os.linesep,\n file=source_file,\n )\n print(\"#include <Eigen/Core>\" + os.linesep, file=source_file)\n\n # Write MakePlantCoeffs()\n self.__write_cpp_func_name(\n source_file, self.plant_coeffs_type, \"PlantCoeffs\", in_header=False\n )\n if self.period_variant:\n self.__write_cpp_matrix(source_file, self.system.sysc.A, \"Acontinuous\")\n self.__write_cpp_matrix(source_file, self.system.sysc.B, \"Bcontinuous\")\n self.__write_cpp_matrix(source_file, self.system.sysd.C, \"C\")\n self.__write_cpp_matrix(source_file, self.system.sysd.D, \"D\")\n print(\n \" return \"\n + self.plant_coeffs_type\n + \"(Acontinuous, Bcontinuous, C, D);\",\n file=source_file,\n )\n else:\n self.__write_cpp_matrix(source_file, self.system.sysd.A, \"A\")\n self.__write_cpp_matrix(source_file, self.system.sysd.B, \"B\")\n self.__write_cpp_matrix(source_file, self.system.sysd.C, \"C\")\n self.__write_cpp_matrix(source_file, self.system.sysd.D, \"D\")\n print(\n \" return \" + self.plant_coeffs_type + \"(A, B, C, D);\",\n file=source_file,\n )\n print(\"}\" + os.linesep, file=source_file)\n\n # Write MakeControllerCoeffs()\n self.__write_cpp_func_name(\n source_file, self.ctrl_coeffs_type, \"ControllerCoeffs\", in_header=False\n )\n self.__write_cpp_matrix(source_file, self.system.K, \"K\")\n self.__write_cpp_matrix(source_file, self.system.Kff, \"Kff\")\n self.__write_cpp_matrix(source_file, self.system.u_min, \"Umin\")\n self.__write_cpp_matrix(source_file, self.system.u_max, \"Umax\")\n print(\n \" return \" + self.ctrl_coeffs_type + \"(K, Kff, Umin, Umax);\",\n file=source_file,\n )\n print(\"}\" + os.linesep, file=source_file)\n\n # Write MakeObserverCoeffs()\n self.__write_cpp_func_name(\n source_file, self.obsv_coeffs_type, \"ObserverCoeffs\", in_header=False\n )\n if self.period_variant:\n self.__write_cpp_matrix(source_file, self.system.Q, \"Qcontinuous\")\n self.__write_cpp_matrix(source_file, self.system.R, \"Rcontinuous\")\n self.__write_cpp_matrix(\n source_file, self.system.P_steady, \"PsteadyState\"\n )\n\n first_line_prefix = \" return \" + self.obsv_coeffs_type + \"(\"\n space_prefix = \" \" * len(first_line_prefix)\n print(first_line_prefix + \"Qcontinuous, Rcontinuous,\", file=source_file)\n print(space_prefix + \"PsteadyState);\", file=source_file)\n else:\n self.__write_cpp_matrix(source_file, self.system.kalman_gain, \"K\")\n print(\" return \" + self.obsv_coeffs_type + \"(K);\", file=source_file)\n print(\"}\" + os.linesep, file=source_file)\n\n # Write MakeLoop()\n self.__write_cpp_func_name(\n source_file, self.loop_type, \"Loop\", in_header=False\n )\n first_line_prefix = \" return \" + self.loop_type + \"(\"\n space_prefix = \" \" * len(first_line_prefix)\n print(\n first_line_prefix + \"Make\" + self.class_name + \"PlantCoeffs(),\",\n file=source_file,\n )\n print(\n space_prefix + \"Make\" + self.class_name + \"ControllerCoeffs(),\",\n file=source_file,\n )\n print(\n space_prefix + \"Make\" + self.class_name + \"ObserverCoeffs());\",\n file=source_file,\n )\n print(\"}\", file=source_file)", "def write_data(self, filename,\n columns=('Q', 'R', 'dR'),\n header=None):\n if header is None:\n header = \"# %s\\n\"%' '.join(columns)\n with open(filename, 'wb') as fid:\n fid.write(asbytes(header))\n data = np.vstack([getattr(self, c) for c in columns])\n np.savetxt(fid, data.T)", "def _write_gen_header(self, Index=False, FLAGS=None):\n if FLAGS is None:\n FLAGS = []\n FTEXT, FHCRC, FEXTRA, FNAME = 1, 2, 4, 8 # extra field bit flags\n current_time = int(time.time())\n time_byte = struct.pack(\"<L\", current_time)\n self.generic_header[\"DATE\"] = time_byte\n if Index:\n self.generic_header[\"FLAGS\"] = b\"\\x10\"\n if FLAGS is not None:\n if \"FTEXT\" in FLAGS:\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FTEXT\n\n if \"FHCRC\" in FLAGS:\n header_crc32 = 0\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FHCRC\n for byte in self.generic_header.values():\n header_crc32 = zlib.crc32(byte, header_crc32)\n\n if \"FEXTRA\" in FLAGS:\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FEXTRA\n\n if \"FNAME\" in FLAGS:\n self.generic_header[\"FLAGS\"] = self.generic_header[\"FLAGS\"] & FNAME\n\n for value in self.generic_header.values():\n self.file_out.write(value)\n if \"FEXTRA\" in FLAGS:\n # WRITE EXTRA FIELD\n pass\n\n if \"FNAME\" in FLAGS:\n # WRITE FNAME FIELD\n fName = self.file_name.split(\"/\")[-1]\n\n if Index:\n self.generic_header[\"FLAGS\"] = b\"\\x00\"\n self.file_out.write(self.index_magic_bytes)\n self.file_out.write(struct.pack(\"<B\", self.max_idx_len))\n self.file_out.write(struct.pack(\"<B\", self.max_offset_len))\n self.index_offset = self.file_out.tell()\n self._allocate_index_bytes()\n\n if \"FHCRC\" in FLAGS:\n # WRITE checksum for header\n pass\n\n return self.file_out.tell()", "def test_writeHeader(self):\n output = StringIO()\n self.builder._writeHeader(output, \"Super Awesometastic 32.16\")\n self.assertEquals(\n output.getvalue(),\n \"Super Awesometastic 32.16\\n\"\n \"=========================\\n\"\n \"\\n\")", "def get_processed_data_file_header_structure(for_training = False):\n\n\tfrom preprocessor.convert_frames_to_episodes import get_output_column_order\n\n\theader = get_output_column_order()\n\tif for_training:\n\t\theader.append(get_training_label_header())\n\treturn header", "def WriteHeaderFileForCcmModel(filename, model): \n\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n header_file = open(ccm_model_name + \".hpp\", 'w')\n\n #Define the header files\n header_file_defn = GetHeaderFileDefinitionString(filename, model)\n header_file.write(header_file_defn)\n\n #Include the appropriate files\n include_files = GetIncludedFilesForHeaderString()\n header_file.write(include_files)\n\n #Define the ODE System class\n ode_class = GetOdeClassDefinitionString(filename, model)\n header_file.write(ode_class)\n\n #Define the serialization\n serialization = GetSerializationInformationString(filename)\n header_file.write(serialization)\n\n #Define the SRN model\n srn_model_defn = GetModelDefinitionString(filename, model, True)\n header_file.write(srn_model_defn)\n\n #Close the file\n header_close = GetHeaderFileClosingString(filename, model)\n header_file.write(header_close)\n\n header_file.close()\n\n print(ccm_model_name + \".hpp written!\\n\")", "def create_headers(*, magnetic_regions, model_name):\n date_str = dt.datetime.now().strftime(\"%d-%b-%Y\")\n\n header_mf = (f\"% Geomagnetic Virtual Observatory Model, file created on: {date_str}\\n\"\n \"% PID_OBA_SUB\\n\"\n \"% Grid solution: EQ\\n\"\n \"% Swarm data used\\n\"\n \"% Data time used: all\\n\"\n \"% Include external field correction: yes\\n\"\n \"% Crustal field corrections used\\n\"\n \"% Potential spatial degree: cubic\\n\"\n \"% Search radius: 700\\n\"\n \"% Target point altitude: 490\\n\"\n \"% Inversion limit: 30\\n\"\n \"% \\n\"\n \"% PCA:\\n\"\n f\"% SV detrended using {model_name}\\n\"\n \"% QD lat min | QD lat max | # PC removed\\n\"\n f\"% {magnetic_regions['1']['min_mag_lat']} {magnetic_regions['1']['max_mag_lat']} {magnetic_regions['1']['proxy_number']}\\n\"\n f\"% {magnetic_regions['2']['min_mag_lat']} {magnetic_regions['2']['max_mag_lat']} {magnetic_regions['2']['proxy_number']}\\n\"\n f\"% {magnetic_regions['3']['min_mag_lat']} {magnetic_regions['3']['max_mag_lat']} {magnetic_regions['3']['proxy_number']}\\n\"\n f\"% {magnetic_regions['4']['min_mag_lat']} {magnetic_regions['4']['max_mag_lat']} {magnetic_regions['4']['proxy_number']}\\n\"\n f\"% {magnetic_regions['5']['min_mag_lat']} {magnetic_regions['5']['max_mag_lat']} {magnetic_regions['5']['proxy_number']}\\n\"\n \"% \\n\"\n \"% theta | phi | Year Month | Time | r | B_r B_theta B_phi | sigma_r sigma_theta sigma_phi | N_{data} |\\n\"\n \"% [deg] | [deg] | | [mjd2000] | [km] | Predicted field - [nT] | Estimated error [nT] | # data |\\n\"\n \"% \\n\")\n\n header_sv = (f\"% Geomagnetic Virtual Observatory Model, file created on: {date_str}\\n\"\n \"% PID_OBA_SUB\\n\"\n \"% Grid solution: EQ\\n\"\n \"% Swarm data used\\n\"\n \"% Data time used: all\\n\"\n \"% Include external field correction: yes\\n\"\n \"% Crustal field corrections used\\n\"\n \"% Potential spatial degree: cubic\\n\"\n \"% Search radius: 700\\n\"\n \"% Target point altitude: 490\\n\"\n \"% Inversion limit: 30\\n\"\n \"% \\n\"\n \"% PCA:\\n\"\n f\"% SV detrended using {model_name}\\n\"\n \"% QD lat min | QD lat max | # PC removed\\n\"\n f\"% {magnetic_regions['1']['min_mag_lat']} {magnetic_regions['1']['max_mag_lat']} {magnetic_regions['1']['proxy_number']}\\n\"\n f\"% {magnetic_regions['2']['min_mag_lat']} {magnetic_regions['2']['max_mag_lat']} {magnetic_regions['2']['proxy_number']}\\n\"\n f\"% {magnetic_regions['3']['min_mag_lat']} {magnetic_regions['3']['max_mag_lat']} {magnetic_regions['3']['proxy_number']}\\n\"\n f\"% {magnetic_regions['4']['min_mag_lat']} {magnetic_regions['4']['max_mag_lat']} {magnetic_regions['4']['proxy_number']}\\n\"\n f\"% {magnetic_regions['5']['min_mag_lat']} {magnetic_regions['5']['max_mag_lat']} {magnetic_regions['5']['proxy_number']}\\n\"\n \"% \\n\"\n \"% theta | phi | Year Month | Time | r | dB_r dB_theta dB_phi | sigma_r sigma_theta sigma_phi | N_{data} |\\n\"\n \"% [deg] | [deg] | | [mjd2000] | [km] | Predicted field - [nT/yr] | Estimated error [nT/yr] | # data |\\n\"\n \"% \\n\")\n return header_mf, header_sv", "def generate_header_from_declarations(function_declarations, verbose=True):\n header = \"\"\n for (f_name, (f_dims, f_dict)) in function_declarations.iteritems():\n s = header_from_function_name_and_args(f_name, f_dict[\"args\"])\n header += s + \"\\n\"\n\n return header", "def data_file(self, *, expand=False, contract=False):\n # Rely on SuperDataObj to proof expand/contract args\n # Extra empty string at the end puts a newline at the end\n # of the generated string, consistent with files\n # generated by Sphinx.\n return \"\\n\".join(\n (\n self.header_preamble,\n self.header_project.format(project=self.project),\n self.header_version.format(version=self.version),\n self.header_zlib,\n *(\n obj.data_line(expand=expand, contract=contract)\n for obj in self.objects\n ),\n \"\",\n )\n ).encode(\"utf-8\")", "def WriteHeader(self, output_mediator):\n self.WriteText('{')\n self._event_counter = 0", "def writeHeader(self):\n try:\n saveBuf = self.buffer\n self.buffer = []\n ## This export format needs the RDF namespace to be defined, add a\n ## prefix for it if there isn't one yet.\n self.setNamespace(\"rdf\", RDF.NAMESPACE, False)\n self.write(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n\")\n self.writeStartOfStartTag(RDF.NAMESPACE, \"RDF\")\n for name, prefix in self.namespaceTable.iteritems():\n self.writeNewLine()\n self.writeIndent()\n self.write(\"xmlns\")\n if len(prefix) > 0: \n self.write(':')\n self.write(prefix) \n self.write(\"=\\\"\")\n self.write(xmlutil.escapeDoubleQuotedAttValue(name))\n self.write(\"\\\"\")\n self.writeEndOfStartTag()\n self.writeNewLine()\n finally: \n self.headerBuffer = self.buffer\n self.buffer = saveBuf\n self.headerWritten = True", "def generate_data(self, file_name, data, header=None):\n with open(f'{self.path_file}/{file_name}.csv', 'w') as csvfile:\n if header:\n csvfile.write(header)\n csvfile.writelines(data)\n return True", "def CosmicFish_write_header(name):\n\n print\n print \"**************************************************************\"\n print \" _____ _ _____ __ \"\n print \" / ___/__ ___ __ _ (_)___/ __(_)__ / / \"\n print \" / /__/ _ \\(_-</ ' \\/ / __/ _// (_-</ _ \\ \"\n print \" \\___/\\___/___/_/_/_/_/\\__/_/ /_/___/_//_/ Py Lib\"\n print \" \"\n print \"**************************************************************\"\n print name\n print \" This application was developed using the CosmicFish code.\"\n print \"**************************************************************\"\n print", "def _writeAuxVariablesHeaderSection(self):\n self.header.write(wrapLine(\"NAUXV\", self.annotation, self.delimiter, \"%d\\n\" % self.NAUXV))\n if self.NAUXV > 0:\n line = ((\"%s\" + self.delimiter) * (self.NAUXV - 1) + \"%s\\n\") % tuple(self.ASCAL)\n self.header.write(wrapLine(\"ASCAL\", self.annotation, self.delimiter, line))\n line = ((\"%s\" + self.delimiter) * (self.NAUXV - 1) + \"%s\\n\") % tuple(self.AMISS)\n self.header.write(wrapLine(\"AMISS\", self.annotation, self.delimiter, line))\n line = \"%s\\n\" * self.NAUXV % tuple(self.ANAME)\n self.header.write(wrapLines(\"ANAME\", self.annotation, self.delimiter, line))", "def add_header(header, filename, i):\n with open(filename, 'r+') as f:\n content = f.readlines()\n content[0] = header\n f.seek(0,0)\n f.write(f'<!-- Generated with XMLGenerator.py {__ver__} | {get_app_name(i)} -->\\n')\n f.writelines(content)", "def header(self):\n ...", "def create_cfile_head(self):\n head = \"\"\"#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n#include \"../init_array_lib/init_dyn_array.h\"\n#include \"../pips_lib/define_script.h\"\n\n\nint main(int argc, const char* argv[])\n{\n srand(time(NULL));\n \"\"\"\n\n self.append_text_to_file(str(head))", "def make_roi_header(**param):\n hdr_list = ['== Integration ROI ==']\n method = [i for i in list(param.keys()) if \"pos\" in i][0].split('_pos')[0]\n hdr_list.append('Integration method: {}'.format(method))\n\n for k, v in list(param.items()):\n hdr_list.append('{}: {}'.format(k, v))\n\n header = \"\\n\".join(['# ' + i for i in hdr_list])\n return header", "def save_header_default(filename, nhalos_per_tree):\n ntrees = len(nhalos_per_tree)\n nhalos = np.sum(nhalos_per_tree)\n dtype1 = np.dtype([('ntrees', 'i4'), ('totnhalos', 'i4')])\n x1 = np.array([(ntrees, nhalos)], dtype=dtype1)\n x2 = nhalos_per_tree.astype('i4')\n header_size = x1.nbytes + x2.nbytes\n # Open\n if isinstance(filename, str):\n fd = open(filename, 'wb')\n close = True\n else:\n fd = filename\n close = False\n # Write\n x1.tofile(fd)\n x2.tofile(fd)\n # Close\n if close:\n fd.close()\n return header_size", "def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)", "def make_table_header(table_file, function_list):\n\tstring = '%10s' %(\" \")\n\tfor function in function_list:\n\t\tstring += \"\\t\"+'%32s' %(\"f_\"+str(function-1))\n\ttable_file.write(string+\"\\n\")\n\tstring = '%10s' %(\" \")+ \"\\t\" + \"------------\"*4*len(function_list)\n\ttable_file.write(string+\"\\n\")\n\treturn None", "def create_header(numValues):\n\n header = []\n for value in range(numValues):\n header.append(\"att{}\".format(value))\n return header", "def generate_header(self, config):\n\n file_name = '{1}{0}.h'.format(self.class_name, config.objc_prefix)\n file_path = _OBJC_BUILD_PATH + string_utils.cpp_group_name_to_objc_group_name(self.group_name) + '/' + file_name\n output_header = open(file_path, 'w')\n\n output_header.write('#import <Foundation/Foundation.h>')\n output_header.write(_OBJC_BR)\n\n for objc_enum in self.objc_enum_list:\n output_header.write(objc_enum.generate_objc_enum(self.class_name, config))\n output_header.write(_OBJC_BR)\n\n output_header.write('NS_ASSUME_NONNULL_BEGIN\\n@interface {1}{0} : NSObject'.format(self.class_name,\n config.objc_prefix))\n output_header.write(_OBJC_BR)\n\n for objc_var in self.objc_var_list:\n output_header.write(objc_var.property(config))\n output_header.write(_OBJC_BR)\n\n output_header.write('@end\\nNS_ASSUME_NONNULL_END')\n output_header.write(_OBJC_BR)", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def create_header(self, tables: List[Dict], schema: bool = False) -> str:\n header = \"\"\n if \"func\" in self.state:\n header += gt.sql_alchemy_func_import + \"\\n\"\n if self.postgresql_dialect_cols:\n header += (\n gt.postgresql_dialect_import.format(\n types=\",\".join(self.postgresql_dialect_cols)\n )\n + \"\\n\"\n )\n if self.constraint:\n header += gt.unique_cons_import + \"\\n\"\n if self.im_index:\n header += gt.index_import + \"\\n\"\n if schema and tables[0].table_schema:\n schema = tables[0].table_schema.replace('\"', \"\")\n header += \"\\n\" + gt.gino_init_schema.format(schema=schema)\n else:\n header += \"\\n\" + gt.gino_init\n return header", "def print_header(data, names, delim):\n ncol = len(names)\n for j, n in enumerate(names):\n if data[n][0].ndim > 0:\n nv = data[n][0].size\n for k in range(nv):\n np = '%s%d' % (n, k)\n print_val(np)\n if k < (nv-1):\n stdout.write('%s' % delim)\n else:\n print_val(n)\n if j < (ncol-1):\n stdout.write('%s' % delim)\n stdout.write('\\n')", "def tofits(outfilename, pixelarray, hdr=None, verbose=True):\n # print \"LOGX:: Entering `tofits` method/function in %(__file__)s\" %\n # globals()\n pixelarrayshape = pixelarray.shape\n if verbose:\n print(\"FITS export shape : (%i, %i)\" % (pixelarrayshape[0], pixelarrayshape[1]))\n\n if pixelarray.dtype.name == \"bool\":\n pixelarray = np.cast[\"uint8\"](pixelarray)\n\n if os.path.isfile(outfilename):\n os.remove(outfilename)\n\n if hdr == None: # then a minimal header will be created\n hdu = pyfits.PrimaryHDU(pixelarray.transpose())\n else: # this if else is probably not needed but anyway ...\n hdu = pyfits.PrimaryHDU(pixelarray.transpose(), hdr)\n\n hdu.writeto(outfilename, output_verify='ignore')\n\n if verbose:\n print(\"Wrote %s\" % outfilename)", "def test_addheader(self):\n datasets = [pd.DataFrame(index=range(100),columns=range(54)) for b in range(10)]\n datasetsnew = tutorial_pamap2.addheader(datasets)\n test = datasetsnew[0].shape == datasets[0].shape\n assert test", "def create_output_file(arr):\r\n for i in arr:\r\n output_file.write(f'{i[0]}\\t{i[1]}\\n')", "def _write_header(self, out_handle):\n out_handle.write(\"##gff-version 3\\n\")", "def processHeader(self, header=None, pdata=None):\n\t\tif self.invariantPData.writer and not self.invariantPData.headerOutputted:\n\t\t\tnewHeader = [\"outputID\", 'noOfOutliers', 'noOfNonMissing', 'outlierFraction', 'chiSqStat', 'chiSqMinusLogPvalue',\\\n\t\t\t\t\t\t'xMedianValue', 'yMedianValue', 'corr']\n\t\t\tself.invariantPData.writer.writerow(newHeader)\n\t\t\tself.invariantPData.headerOutputted = True", "def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s", "def genInputFile(self, xdata, ydata):\n\n assert xdata.shape == ydata.shape, 'data shapes not the same'\n\n # build the string to save\n s = ' Interface input file\\n'\n\n for key, value in self.params.get(xdata).items():\n if type(value) is dict:\n for k, v in value.items():\n if v is not None:\n s += getParamString(key, k, v)\n\n elif value is not None:\n s += getParamString(key, '', value)\n\n s += \" END\\r\\n\"\n\n # write data\n\n NYstr = \" \" + \"{:6}\".format(\"NY\") + \"{:5}\".format(len(xdata))\n\n s += NYstr + \"\\r\\n\"\n\n data = np.concatenate((xdata, ydata))\n\n for i, c in enumerate(data):\n s += \"{:>11.4E}\".format(c) + '\\r\\n'\n\n with open(self.inputfile, 'w+') as of:\n of.write(s)", "def generate(inputFilename, outputFilename = defaultFileName, \n sizeOfReducedSample = DEFSIZEOFREDUCEDSAMPLE, \n centerEta = DEFCENTERETA, centerPhi = DEFCENTERPHI): \n listOfSignals = convert(inputFilename)\n arrayOfSignals = np.array(listOfSignals)\n arrayOfSignals.shape\n np.save(outputFilename, arrayOfSignals, allow_pickle=False)\n print(\"npy array name: \",outputFilename)", "def createMfile(dHeader):\n\tif specParamsOK(dHeader):\n createMatlabScript(dHeader)\n else:\n raise 'spec params error'", "def numpy_to_h5py(in_dir=config.dir_npy, split = config.split):\n\n in_files=[x[:-13] for x in os.listdir(in_dir) if x.endswith('_voc_stft.npy') and not x.startswith('._')]\n\n random.shuffle(in_files)\n\n\n num_files = len(in_files)\n\n split_idx = int(num_files*split)\n\n trn_files = in_files[:split_idx]\n\n val_files = in_files[split_idx:]\n\n num_val_files = len(val_files)\n\n print('Processing %d training files' % split_idx)\n logger.info('Processing %d training files' % split_idx)\n\n logger.info('Training file: %s' % config.h5py_file_train)\n\n voc_shape_trn = [split_idx, 5170,config.input_features]\n\n mix_shape_trn = [split_idx, 5170,config.input_features]\n\n feats_shape_trn = [split_idx, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_train, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in trn_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, split_idx)\n\n logger.info('Processed training file: %s' % f)\n\n hdf5_file.close()\n\n print('Processing %d validation files' % num_val_files)\n logger.info('Processing %d validation files' % num_val_files)\n\n logger.info('Validation file: %s' % config.h5py_file_val)\n\n voc_shape_trn = [num_val_files, 5170,config.input_features]\n\n mix_shape_trn = [num_val_files, 5170,config.input_features]\n\n feats_shape_trn = [num_val_files, 5170,config.output_features]\n\n hdf5_file = h5py.File(config.h5py_file_val, mode='w')\n\n hdf5_file.create_dataset(\"voc_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"mix_stft\", mix_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"back_stft\", voc_shape_trn, np.float32)\n\n hdf5_file.create_dataset(\"feats\", feats_shape_trn, np.float32)\n\n\n i = 0\n\n for f in val_files:\n\n voc_stft = np.load(in_dir+f+'_voc_stft.npy')\n\n voc_stft = voc_stft.astype('float32')\n\n mix_stft = np.load(in_dir+f+'_mix_stft.npy')\n\n mix_stft = mix_stft.astype('float32')\n\n synth_feats = np.load(in_dir+f+'_synth_feats.npy')\n\n synth_feats = synth_feats.astype('float32')\n\n back_stft = np.load(in_dir+f+'_back_stft.npy')\n\n back_stft = back_stft.astype('float32')\n\n hdf5_file[\"voc_stft\"][i,...] = voc_stft\n\n hdf5_file[\"mix_stft\"][i,...] = mix_stft\n\n hdf5_file[\"back_stft\"][i,...] = back_stft\n\n hdf5_file[\"feats\"][i,...] = synth_feats\n\n i+=1\n utils.progress(i, num_val_files)\n\n logger.info('Processed validation file: %s' % f)\n\n hdf5_file.close()\n # return original_ffts", "def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict" ]
[ "0.62532175", "0.6250455", "0.6128656", "0.61236686", "0.61083066", "0.60900545", "0.6076962", "0.60498697", "0.6037466", "0.6030116", "0.5991017", "0.59467375", "0.5944712", "0.59186554", "0.5880443", "0.5877636", "0.587325", "0.5867404", "0.5816105", "0.58092946", "0.58088666", "0.5794874", "0.5794466", "0.5767885", "0.57637346", "0.57587516", "0.57574004", "0.5754054", "0.57498926", "0.56999576", "0.569363", "0.56927115", "0.56839764", "0.56780916", "0.56722987", "0.5670419", "0.5644894", "0.5611841", "0.56088686", "0.5597276", "0.5593906", "0.55911076", "0.5586425", "0.5584152", "0.5576967", "0.55766857", "0.5558082", "0.55575585", "0.5556879", "0.5554183", "0.5539674", "0.55380476", "0.55289483", "0.55204666", "0.5512625", "0.550406", "0.55008644", "0.550067", "0.5499393", "0.54968476", "0.5493453", "0.54922485", "0.5486838", "0.54816914", "0.5478155", "0.54593885", "0.5455127", "0.54489636", "0.5448194", "0.54441077", "0.54400426", "0.54350865", "0.5433064", "0.54239655", "0.5420038", "0.5411443", "0.54100096", "0.5408783", "0.54087114", "0.5389502", "0.5376592", "0.53754795", "0.53731555", "0.53721464", "0.5363307", "0.5357573", "0.53542346", "0.5352155", "0.5351699", "0.5349107", "0.53485465", "0.534398", "0.5338328", "0.5337051", "0.5324889", "0.5320375", "0.5317263", "0.531644", "0.53083754", "0.5300186" ]
0.7628469
0
Convert a tflite model buffer in a Relay module
def convert_to_relay(tflite_model_buf, bind_params_by_name=True): # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1 try: import tflite.Model # pylint: disable=import-outside-toplevel tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite # pylint: disable=import-outside-toplevel tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except ImportError: raise ImportError("The tflite package must be installed") mod, params = relay.frontend.from_tflite(tflite_model) if bind_params_by_name: mod["main"] = relay.build_module.bind_params_by_name(mod["main"], params) return mod, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_relay_module_and_inputs_from_tflite_file(tflite_model_file, bind_params_by_name=True):\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n mod, params = convert_to_relay(tflite_model_buf, bind_params_by_name)\n\n inputs = dict()\n for param in mod[\"main\"].params:\n name = str(param.name_hint)\n data_shape = [int(i) for i in param.type_annotation.shape]\n dtype = str(param.type_annotation.dtype)\n if np.issubdtype(dtype, np.floating):\n # Since np.random.uniform only allows the ranges of float32,\n # at first float16 is used and scaled afterwards, if necessary.\n in_min, in_max = (np.finfo(\"float16\").min, np.finfo(\"float16\").max)\n data = np.random.uniform(low=in_min, high=in_max, size=data_shape).astype(dtype)\n scale = np.finfo(dtype).min / np.finfo(\"float16\").min\n data *= scale\n elif np.issubdtype(dtype, np.integer):\n in_min, in_max = (np.iinfo(dtype).min, np.iinfo(dtype).max)\n data = np.random.randint(in_min, high=in_max, size=data_shape, dtype=dtype)\n else:\n raise TypeError(f\"Type {dtype} not supported\")\n inputs[name] = data\n\n return mod, inputs, params", "def from_tflite(model, prog_name): #, shape_dict, dtype_dict):\n try:\n import tflite.Model\n import tflite.SubGraph\n import tflite.BuiltinOperator\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n assert isinstance(model, tflite.Model.Model)\n\n # keep the same as tflite\n assert model.SubgraphsLength() == 1, \"only support one subgraph (main subgraph)\"\n subgraph = model.Subgraphs(0)\n\n # model inputs / outputs\n model_inputs = subgraph.InputsAsNumpy()\n model_outputs = subgraph.OutputsAsNumpy()\n assert model_inputs.size == 1, \"Model should have only one input\"\n assert model_outputs.size == 1, \"Model should have only one output\"\n\n # op code in model\n op_converter = OperatorConverter(model, subgraph, prog_name)\n op_converter.is_dequantize = False\n op_converter.check_unsupported_ops()\n\n in_tensor = op_converter.get_tensors(model_inputs)[0]\n out_tensor = op_converter.get_tensors(model_outputs)[0]\n\n op_converter.define_model_sizes(\"IN\", in_tensor)\n op_converter.define_model_sizes(\"OUT\", out_tensor)\n\n op_converter.nn_add_input(in_tensor)\n\n output_nodes = op_converter.convert_op_to_hexagon_nn()\n\n op_converter.nn_add_output(output_nodes)\n\n op_converter.print_nn_nodes()\n\n print(\"tensor_tab:\")\n print(op_converter.tensor_tab)\n\n op_converter.close()\n print(\"Converted Hexagon Model saved to {}\".format(op_converter.h_file.name))", "def convert_from_frozen_graph():\n input_arrays = [\"input\"]\n converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(\n graph_def_file='/media/main/Data/Task/RPiCigDetector/utils/test_model/frozen_inference_graph.pb',\n # both `.pb` and `.pbtxt` files are accepted.\n input_arrays=['input'],\n input_shapes={'input': [1, 224, 224, 3]},\n output_arrays=['MobilenetV1/Predictions/Softmax']\n )\n converter.allow_custom_ops = True\n # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\n # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_type = tf.lite.constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {input_arrays[0]: (128, 128)}\n tflite_model = converter.convert()\n\n # Save the model.\n with open('model.tflite', 'wb') as f:\n f.write(tflite_model)", "def convert_to_model(self, *args):", "def decode(self, model: bytes):\n _, path = tempfile.mkstemp()\n with open(path, \"wb\") as fd:\n fd.write(model)\n onnx_model = onnx.load(path)\n pytorch_model = ConvertModel(onnx_model)\n os.remove(path)\n return pytorch_model", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def load_vm_flatbuffer(\n vm_flatbuffer: bytes, *, driver: Optional[str] = None, backend: Optional[str] = None\n) -> BoundModule:\n config = _create_config(driver=driver, backend=backend)\n vm_module = _binding.VmModule.copy_buffer(config.vm_instance, vm_flatbuffer)\n return load_vm_module(vm_module, config)", "def model_wrapper(self):\n original = self.args.rnn_type\n if(self.args.rnn_type=='DeepCoNN'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='TRANSNET'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM_TNET'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='DATT'):\n self.args.rnn_type ='RAW_MSE_DUAL_DOT'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='MPCN'):\n self.args.rnn_type = 'RAW_MSE_MPCN_FN_FM'\n self.args.base_encoder = 'NBOW'\n\n print(\"Conversion to {} | base:{}\".format(\n self.args.rnn_type,\n self.args.base_encoder))", "def tflite_load_model(model_file):\n interpreter = tf.lite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def unpack(self, buff, verbose=0):\n\n\n # See https://docs.python.org/3/library/struct.html#struct.pack\n # for struck pack format\n\n # Local methods to unpack numbers in little-endian format\n idx={'x':0}\n\n def read_uint8():\n idx['x']+=1\n return struct.unpack('<B', buf[idx['x']-1:idx['x']])[0]\n def read_uint32():\n idx['x']+=4\n return struct.unpack('<I', buf[idx['x']-4:idx['x']])[0]\n def read_float32():\n idx['x']+=4\n return struct.unpack('<f', buf[idx['x']-4:idx['x']])[0]\n\n # Return empty model in case the byte-array contains no information\n if len(buf) == 0:\n return None\n\n # Read global stddev and mean (not used in RQRMI version 1.1)\n _=read_float32()\n _=read_float32()\n\n num_of_stages=read_uint32()\n _log(verbose, 'Num of stages: %d' % num_of_stages)\n\n # Preallocate array\n trained_rqrmi=[None for _ in range(num_of_stages)]\n\n for s in range(num_of_stages):\n\n # Read the current stage\n num_of_models=read_uint32()\n\n _log(verbose, '\\nStage %d num of models: %d' % (s, num_of_models))\n\n # Preallocate net_list\n net_list=[None for _ in range(num_of_models)]\n\n for m in range(num_of_models):\n # Read version\n version=read_uint8()\n if version==0:\n _log(verbose, '\\nSkipping model <%d,%d>: model not compiled' % (s, m))\n continue\n elif version!=2:\n _log(verbose, '\\nUnsupported version for model <%d,%d>' % (s, m))\n continue\n\n _log(verbose, '\\nLoading model <%d, %d>: ' % (s,m))\n\n # Read model parameters\n mu=read_float32()\n sig=read_float32()\n fac=read_float32()\n omin=read_float32()\n num_of_layers=read_uint32()\n _log(verbose, 'layers: %d, ' % num_of_layers)\n\n # Preallocate net values\n net_values=[None for _ in range(2*num_of_layers-1)]\n\n # Read network structure\n structure=[None for _ in range(num_of_layers)]\n for l in range(num_of_layers):\n structure[l]=read_uint32()\n\n # Layer 0 bias\n net_values[0]=np.empty(structure[0])\n\n # Preallocate all other layers\n for l in range(1, num_of_layers):\n net_values[2*l-1]=np.empty(structure[l]) # Layer bias\n net_values[2*l-0]=np.empty([structure[l-1], structure[l]]) # Layer weights\n\n _log(verbose, 'structure: [%s]' % ','.join([str(x) for x in structure]))\n\n # Read values of first layer\n net_values[0][0]=read_float32()\n _=read_float32() # First layer weight is one (always)\n\n # Read values\n for l in range(1, num_of_layers):\n # Read bias\n for i in range(structure[l]):\n net_values[2*l-1][i]=read_float32()\n # Read weights\n for y in range(structure[l-1]):\n for x in range(structure[l]):\n net_values[2*l][y,x]=read_float32()\n\n # Update stage's net list\n net_list[m]=(mu, sig, fac, omin, net_values)\n\n # Update output with stage\n trained_rqrmi[s] = net_list\n\n # Read the maximum error of each last stage submodel\n self.error_list = []\n for e in range(len(self.trained_rqrmi[-1])):\n self.error_list.append(read_uint32())\n\n _log(verbose, '\\n')\n self.trained_rqrmi = trained_rqrmi", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs))\n _x = self.model.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.id))\n _x = self.model.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs))\n _x = self.model.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model.track.id))\n length = len(self.model.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose:\n _v1 = val1.position\n _x = _v1\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v2 = val1.orientation\n _x = _v2\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v3 = val1.stamp\n _x = _v3\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_projected:\n _v4 = val1.position\n _x = _v4\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v5 = val1.orientation\n _x = _v5\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.pose_resampled:\n _v6 = val1.position\n _x = _v6\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v7 = val1.orientation\n _x = _v7\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.model.track.pose_flags))\n length = len(self.model.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n _x = self\n buff.write(_struct_3I.pack(_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs))\n _x = self.data.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.id))\n _x = self.data.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs))\n _x = self.data.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data.track.id))\n length = len(self.data.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose:\n _v8 = val1.position\n _x = _v8\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v9 = val1.orientation\n _x = _v9\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v10 = val1.stamp\n _x = _v10\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_projected:\n _v11 = val1.position\n _x = _v11\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v12 = val1.orientation\n _x = _v12\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.pose_resampled:\n _v13 = val1.position\n _x = _v13\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v14 = val1.orientation\n _x = _v14\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.data.track.pose_flags))\n length = len(self.data.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def run_model(model):\n\n model.create_initialised_input()\n\n model.run_from_buffer()\n\n output = model.output_parse()\n return output", "def convert_model(self, model: nn.Module) -> nn.Module:\n if self.sync_bn is not None:\n try:\n model = convert_sync_batchnorm(model, self.sync_bn)\n except ValueError as e:\n self.logger.error('cfg.sync_bn should be \"torch\" or '\n f'\"mmcv\", but got {self.sync_bn}')\n raise e\n\n return model", "def vid2tensor( self, current_frame):", "def convert_quantized_tflite_model(frozen_graph_file, tflite_file_path):\n # Convert the model.\n converter = tf.contrib.lite.TFLiteConverter.from_frozen_graph(\n graph_def_file=frozen_graph_file,\n input_arrays=[\"normalized_input_image_tensor\"],\n input_shapes={\"normalized_input_image_tensor\": [1, 300, 300, 3]},\n output_arrays=['TFLite_Detection_PostProcess',\n 'TFLite_Detection_PostProcess:1',\n 'TFLite_Detection_PostProcess:2',\n 'TFLite_Detection_PostProcess:3'],\n )\n converter.allow_custom_ops = True\n\n converter.quantized_input_stats = {\"normalized_input_image_tensor\": (0., 1.)}\n # mean, std_dev (input range is [-1, 1])\n converter.inference_type = tf.lite.constants.QUANTIZED_UINT8 # this is the recommended type.\n # converter.inference_input_type = tf.uint8 # optional\n # converter.inference_output_type = tf.uint8 # optional\n tflite_model = converter.convert()\n\n # Save the model.\n with open(tflite_file_path, 'wb') as f:\n f.write(tflite_model)", "def to_payload(self, model):\n return model", "def test_export_pytorch_model(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.header.seq, _x.model_aligned.header.stamp.secs, _x.model_aligned.header.stamp.nsecs))\n _x = self.model_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.id))\n _x = self.model_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.model_aligned.track.header.seq, _x.model_aligned.track.header.stamp.secs, _x.model_aligned.track.header.stamp.nsecs))\n _x = self.model_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.model_aligned.track.id))\n length = len(self.model_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose:\n _v57 = val1.position\n _x = _v57\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v58 = val1.orientation\n _x = _v58\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v59 = val1.stamp\n _x = _v59\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.model_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_projected:\n _v60 = val1.position\n _x = _v60\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v61 = val1.orientation\n _x = _v61\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.pose_resampled:\n _v62 = val1.position\n _x = _v62\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v63 = val1.orientation\n _x = _v63\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.model_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.model_aligned.track.pose_flags))\n length = len(self.model_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.model_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.header.seq, _x.data_aligned.header.stamp.secs, _x.data_aligned.header.stamp.nsecs))\n _x = self.data_aligned.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.id))\n _x = self.data_aligned.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.params)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.params:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_struct_dB.pack(_x.value, _x.type))\n _x = self\n buff.write(_struct_3I.pack(_x.data_aligned.track.header.seq, _x.data_aligned.track.header.stamp.secs, _x.data_aligned.track.header.stamp.nsecs))\n _x = self.data_aligned.track.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.data_aligned.track.id))\n length = len(self.data_aligned.track.pose)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose:\n _v64 = val1.position\n _x = _v64\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v65 = val1.orientation\n _x = _v65\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_headers)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_headers:\n buff.write(_struct_I.pack(val1.seq))\n _v66 = val1.stamp\n _x = _v66\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = val1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.data_aligned.track.pose_projected)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_projected:\n _v67 = val1.position\n _x = _v67\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v68 = val1.orientation\n _x = _v68\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_resampled)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.pose_resampled:\n _v69 = val1.position\n _x = _v69\n buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))\n _v70 = val1.orientation\n _x = _v70\n buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.data_aligned.track.pose_flags)\n buff.write(_struct_I.pack(length))\n pattern = '<%sI'%length\n buff.write(struct.pack(pattern, *self.data_aligned.track.pose_flags))\n length = len(self.data_aligned.track.channels)\n buff.write(_struct_I.pack(length))\n for val1 in self.data_aligned.track.channels:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.values)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.values))\n buff.write(_struct_9d.pack(*self.R))\n buff.write(_struct_3d.pack(*self.T))\n _x = self\n buff.write(_struct_df.pack(_x.dist_rot, _x.dist_trans))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n transpose_out_name = node_entry[\"input_names\"][0]\n inter_output_names = [node_entry[\"output_names\"][0]]\n # axis==3 means channel is specified along the 3rd axis\n if attrs[\"axis\"] == 3:\n transpose_out_name = f\"transpose_{node_entry['name']}\"\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n [node_entry[\"input_names\"][0]],\n [transpose_out_name],\n perm=[0, 3, 1, 2],\n )\n model_container.add_nodes([node_transposed])\n inter_output_names = [f\"batch_norm_{node_entry['name']}\"]\n\n input_names = [transpose_out_name] + node_entry[\"input_names\"][1:]\n batch_norm_node = onnx.helper.make_node(\n cls.__name__, input_names, inter_output_names, epsilon=attrs[\"epsilon\"]\n )\n model_container.add_nodes([batch_norm_node])\n\n if attrs[\"axis\"] == 3:\n node_transposed = onnx.helper.make_node(\n Transpose.__name__,\n inter_output_names,\n [node_entry[\"output_names\"][0]],\n perm=[0, 2, 3, 1],\n )\n model_container.add_nodes([node_transposed])", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def convert_model(self, backend, model, weight, **kwargs):\n om_save_path = kwargs[\"save_dir\"]\n input_shape = kwargs[\"input_shape\"]\n out_nodes = kwargs[\"out_nodes\"]\n log_save_path = os.path.dirname(model)\n command_line = [\"bash\", self.current_path + \"/model_convert.sh\", backend,\n model, weight, om_save_path, log_save_path, input_shape, out_nodes]\n try:\n subprocess.check_output(command_line)\n except subprocess.CalledProcessError as exc:\n logging.error(\"convert model to bolt failed. The return message is : {}.\".format(exc))", "def model_fn(model_dir):\n\n net = gluon.nn.SymbolBlock.imports('%s/model.json' % model_dir,\n ['data'], \n param_file='%s/model.params' % model_dir,\n ctx=mx.cpu())\n\n return net", "def test_forward_mobilenet_v1(accel_type=\"ethos-u55-256\"):\n np.random.seed(23)\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/\"\n \"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\",\n \"mobilenet_v1_1.0_224_quant.tflite\",\n )\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n input_tensor = \"input\"\n input_dtype = \"uint8\"\n input_shape = (1, 224, 224, 3)\n in_min, in_max = util.get_range_for_dtype_str(input_dtype)\n input_data = np.random.randint(in_min, high=in_max, size=input_shape, dtype=input_dtype)\n\n relay_mod, params = convert_to_relay(tflite_model_buf, input_data, \"input\")\n input_data = {input_tensor: input_data}\n output_data = generate_ref_data(relay_mod, input_data)\n\n mod = partition_for_ethosu(relay_mod, params)\n compiled_models = infra.build_source(\n mod, input_data, output_data, accel_type, output_tolerance=10\n )\n infra.verify_source(compiled_models, accel_type)", "def create_model(hparams, mode):\n\n graph = tf.Graph()\n\n with graph.as_default():\n with tf.name_scope(\"input_pipe\"):\n dataset = create_dataset(hparams, mode)\n iterator = dataset.make_initializable_iterator()\n model = LMandBDRNNModel(hparams=hparams,\n iterator=iterator,\n mode=mode)\n\n sess = tf.Session(graph=graph)\n\n modeltuple = ModelTuple(graph=graph, iterator=iterator,\n model=model, session=sess)\n\n return modeltuple", "def convert_layers(model):\n\n import logging\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n for name, module in model._modules.items():\n if len(list(module.children())) > 0:\n model._modules[name] = convert_layers(model=module)\n try:\n module_str = str(module)\n module_new = eval(module_str)\n try:\n module_new.weight = module.weight\n module_new.bias = module.bias\n except:\n pass\n model._modules[name] = module_new\n logger.info(\"Quantizing \" + str(name) + \" \" + str(module))\n except:\n pass\n return model", "def preprocess_module(mod):\n\n def alter_conv(attrs, inputs, tinfos, out_type):\n new_attrs = dict(attrs)\n data_info = tinfos[0]\n weight_info = tinfos[1]\n (desired_data_layout, desired_kernel_layout) = (\"NCHW\", \"OIHW\")\n new_attrs[\"data_layout\"] = desired_data_layout\n new_attrs[\"kernel_layout\"] = desired_kernel_layout\n\n if is_depthwise_conv2d(\n data_info.shape,\n attrs[\"data_layout\"],\n weight_info.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n ):\n dkl = desired_kernel_layout\n new_attrs[\"kernel_layout\"] = dkl[1] + dkl[0] + dkl[2] + dkl[3]\n return relay.nn.conv2d(*inputs, **new_attrs)\n\n with OpAttrContext(\"nn.conv2d\", \"FTVMAlterOpLayout\", alter_conv):\n seq = tvm.transform.Sequential(\n [\n transform.ConvertLayout({\"nn.conv2d\": [\"NCHW\", \"OIHW\"]}),\n transform.ConvertLayout({\"nn.conv2d_transpose\": [\"NCHW\", \"OIHW\"]}),\n transform.AlterOpLayout(),\n transform.FoldConstant(),\n ]\n )\n with tvm.transform.PassContext(opt_level=3):\n preprocessed_mod = seq(mod)\n return preprocessed_mod", "def _get_model(self, model_path='model.tflite'):\n interpreter = tf.lite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n return interpreter", "def reconstruct_input_ext(self, model_in):", "def to_local(self, id_tensor):\n ...", "def load_model(model_file):\n # Load TFLite model and allocate tensors.\n interpreter = tflite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def convert_from_onnx_model(\n model,\n output_dir='my-hls-test',\n project_name='myproject',\n input_data_tb=None,\n output_data_tb=None,\n backend='Vivado',\n hls_config=None,\n **kwargs,\n):\n\n config = create_config(output_dir=output_dir, project_name=project_name, backend=backend, **kwargs)\n\n config['OnnxModel'] = model\n config['InputData'] = input_data_tb\n config['OutputPredictions'] = output_data_tb\n config['HLSConfig'] = {}\n\n if hls_config is None:\n hls_config = {}\n\n model_config = hls_config.get('Model', None)\n config['HLSConfig']['Model'] = _check_model_config(model_config)\n\n _check_hls_config(config, hls_config)\n\n return onnx_to_hls(config)", "def cast_model_to_bf16(\n program, startup_prog=None, amp_lists=None, use_bf16_guard=True\n):\n\n if amp_lists is None:\n amp_lists = AutoMixedPrecisionListsBF16()\n global_block = program.global_block()\n keep_fp32_ops = set()\n to_bf16_var_names = set()\n to_bf16_pre_cast_ops = set()\n origin_ops = []\n for block in program.blocks:\n origin_ops.extend(block.ops)\n\n for block in program.blocks:\n ops = block.ops\n for op in ops:\n if op.type == 'create_py_reader' or op.type == 'read':\n continue\n if _need_keep_fp32(op, amp_lists.unsupported_list, use_bf16_guard):\n keep_fp32_ops.add(op)\n continue # processed below\n for in_name in op.input_names:\n if op.type in {\n 'batch_norm',\n 'fused_bn_add_activation',\n 'layer_norm',\n } and in_name not in {'X', 'Z'}:\n continue\n for in_var_name in op.input(in_name):\n in_var = None\n try:\n in_var = block.var(in_var_name)\n except ValueError as e:\n _logger.debug(\n \"-- {}, try to get it in the global block --\".format(\n e\n )\n )\n in_var = global_block.var(in_var_name)\n if in_var is not None:\n _logger.debug(\n \"-- var {} is got in the global block --\".format(\n in_var_name\n )\n )\n\n if in_var is None or in_var.type not in _valid_types:\n continue\n\n if in_var.dtype == core.VarDesc.VarType.FP32:\n in_var.desc.set_dtype(core.VarDesc.VarType.BF16)\n to_bf16_var_names.add(in_var_name)\n\n _logger.debug(\n \"-- op type: {}, in var name: {}, in var dtype: {} --\".format(\n op.type, in_var_name, in_var.dtype\n )\n )\n\n for out_name in op.output_names:\n if (\n op.type\n in {'batch_norm', 'fused_bn_add_activation', 'layer_norm'}\n and out_name != 'Y'\n ):\n continue\n for out_var_name in op.output(out_name):\n out_var = None\n try:\n out_var = block.var(out_var_name)\n except ValueError as e:\n _logger.debug(\n \"-- {}, try to get it in the global block --\".format(\n e\n )\n )\n out_var = global_block.var(out_var_name)\n if out_var is not None:\n _logger.debug(\n \"-- var {} is got in the global block --\".format(\n out_var_name\n )\n )\n\n if out_var is None or out_var.type not in _valid_types:\n continue\n\n if out_var.dtype == core.VarDesc.VarType.FP32:\n out_var.desc.set_dtype(core.VarDesc.VarType.BF16)\n\n _logger.debug(\n \"-- op type: {}, out var name: {}, out var dtype: {} --\".format(\n op.type, out_var_name, out_var.dtype\n )\n )\n for attr_name in ['in_dtype', 'out_dtype', 'dtype']:\n if (\n op.has_attr(attr_name)\n and op.attr(attr_name) == core.VarDesc.VarType.FP32\n ):\n op._set_attr(attr_name, core.VarDesc.VarType.BF16)\n if op.has_attr('use_mkldnn'):\n op._set_attr('use_mkldnn', True)\n if op.has_attr('mkldnn_data_type'):\n op._set_attr('mkldnn_data_type', 'bfloat16')\n\n if startup_prog is not None:\n cast_initializers_to_bf16(\n startup_prog,\n amp_lists,\n global_block,\n ops,\n keep_fp32_ops,\n to_bf16_var_names,\n )\n\n # process ops in keep_fp32_ops\n op_var_rename_map = [\n collections.OrderedDict() for _ in range(len(program.blocks))\n ]\n for block in program.blocks:\n ops = block.ops\n idx = 0\n while idx < len(ops):\n op = ops[idx]\n num_cast_ops = 0\n if op not in keep_fp32_ops:\n if op in to_bf16_pre_cast_ops:\n in_var_cast_num = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.BF16,\n )\n num_cast_ops += in_var_cast_num\n else:\n pre_cast_num = _insert_cast_op(\n block,\n op,\n idx,\n core.VarDesc.VarType.BF16,\n core.VarDesc.VarType.FP32,\n )\n num_cast_ops += pre_cast_num\n for out_var_name in op.output_arg_names:\n out_var = block.vars.get(out_var_name)\n if out_var is None or out_var.type not in _valid_types:\n continue\n if out_var.dtype == core.VarDesc.VarType.BF16:\n out_var.desc.set_dtype(core.VarDesc.VarType.FP32)\n post_ops = find_true_post_op(ops, op, out_var_name)\n for post_op in post_ops:\n if post_op in keep_fp32_ops:\n continue\n post_cast_num = _insert_cast_post_op(\n block,\n op,\n idx + pre_cast_num + 1,\n core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.BF16,\n out_var_name,\n op_var_rename_map,\n )\n num_cast_ops += post_cast_num\n idx += num_cast_ops + 1\n\n _rename_op_input(program, op_var_rename_map, origin_ops, keep_fp32_ops)\n return to_bf16_var_names", "def serialize(self, buff):\n try:\n buff.write(_struct_B.pack(self.type))\n _x = self.model\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.head_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.body_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.arm_version\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_2B3i.pack(_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def convert_from_pytorch_model(\n model,\n input_shape,\n output_dir='my-hls-test',\n project_name='myproject',\n input_data_tb=None,\n output_data_tb=None,\n backend='Vivado',\n hls_config=None,\n **kwargs,\n):\n\n config = create_config(output_dir=output_dir, project_name=project_name, backend=backend, **kwargs)\n\n config['PytorchModel'] = model\n config['InputShape'] = input_shape\n config['InputData'] = input_data_tb\n config['OutputPredictions'] = output_data_tb\n config['HLSConfig'] = {}\n\n if hls_config is None:\n hls_config = {}\n\n model_config = hls_config.get('Model', None)\n config['HLSConfig']['Model'] = _check_model_config(model_config)\n\n _check_hls_config(config, hls_config)\n\n return pytorch_to_hls(config)", "def Translate(self):\n mojom_file = self._graph.files[self._file_name]\n\n mod = self._module\n self.PopulateModuleMetadata(mod, mojom_file)\n\n mod.imports = []\n if mojom_file.imports:\n mod.imports = [self.ImportFromMojom(imp) for imp in mojom_file.imports]\n # When translating an imported type, its SourceFileInfo.file_name is a key\n # into self._imports. The value is the module from which the type was\n # imported.\n self._imports = {imp['module'].path: imp for imp in mod.imports}\n\n if mojom_file.declared_mojom_objects:\n if mojom_file.declared_mojom_objects.top_level_constants:\n mod.constants = [\n self.ConstFromMojom(\n self._graph.resolved_values[key].declared_constant, None)\n for key in mojom_file.declared_mojom_objects.top_level_constants]\n\n user_defined_types = ['interfaces', 'structs', 'unions']\n for user_defined_type in user_defined_types:\n if getattr(mojom_file.declared_mojom_objects, user_defined_type):\n setattr(mod, user_defined_type, [self.UserDefinedFromTypeKey(key)\n for key in getattr(\n mojom_file.declared_mojom_objects, user_defined_type)])\n if mojom_file.declared_mojom_objects.top_level_enums:\n mod.enums = [self.UserDefinedFromTypeKey(key)\n for key in mojom_file.declared_mojom_objects.top_level_enums]\n\n return mod", "def load_model(self, model_as_bytes: bytes) -> None:\n\n self.model = deserialize_from_zippy(model_as_bytes)", "def encode(self, model: nn.Module, dummy_input: torch.Tensor):\n _, path = tempfile.mkstemp()\n\n try:\n torch.onnx.export(model, dummy_input, path, verbose=True)\n with open(path, \"rb\") as fd:\n converted_model = fd.read()\n except Exception as e:\n converted_model = None\n print(f\"Error occurred: {e}\\n\")\n finally:\n os.remove(path)\n return converted_model", "def _decode_record(record,name_to_features):\n example = tf.parse_single_example(record,name_to_features)\n\n return example", "def test_from_onnx(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n f = onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)\n f.seek(0)\n\n crypten_model = onnx_converter.from_onnx(f)\n\n self.assertTrue(hasattr(crypten_model, \"encrypt\"))", "def decode_state(buffer: Any) -> np.ndarray:\n state_flat = np.frombuffer(buffer, dtype=np.float32)\n assert state_flat.size == STATE_SIZE\n return state_flat", "def conv2py(self, output_py_model=\"model.py\", predict_function_name=\"gbdt_predict\"):\n self.model.booster().dump_model(self.tmp_file)\n all_nodes_info, total_tree_num = parse_raw_text_model_file(self.tmp_file)\n fw = open(output_py_model, \"w\")\n fw.write('import sys\\nreload(sys)\\nsys.setdefaultencoding(\\'utf8\\')\\n\\n')\n\n def print_one_tree(tree_node_id):\n try:\n tree_id, node_id = tree_node_id.split(\"_\")\n tree_id = int(tree_id)\n node_id = int(node_id)\n except:\n raise ValueError(\"Wrong model file!!\")\n \n def traverse(tree_node_id):\n fw.write(\"\\ndef nodeFunc_%s(feature_dict):\\n\"%tree_node_id) \n node_info_dict = all_nodes_info[tree_node_id]\n if \"leaf\" in node_info_dict:\n fw.write(\"\\treturn %f\\n\"%node_info_dict[\"leaf\"])\n else:\n operators = [\"<\", \"<=\", \">\", \">=\", \"==\"]\n condition = node_info_dict[\"condition\"]\n feature_name = \"\"\n optor = \"\"\n value = \"\"\n for operator in operators:\n content = condition.split(operator)\n if len(content) == 2:\n feature_name = content[0]\n opter = operator\n value = content[1]\n break\n if feature_name == \"\":\n raise ValueError(\"Wrong model file!!\")\n \n fw.write(\"\\tif \\\"%s\\\" not in feature_dict or feature_dict[\\\"%s\\\"] is None:\\n\"%(feature_name, feature_name))\n fw.write(\"\\t\\treturn nodeFunc_%s(feature_dict)\\n\"%node_info_dict[\"missing\"])\n fw.write(\"\\tif feature_dict[\\\"%s\\\"] %s %s:\\n\\t\\treturn nodeFunc_%s(feature_dict)\\n\"%(feature_name, opter, value, node_info_dict[\"yes\"]))\n fw.write(\"\\telse:\\n\\t\\treturn nodeFunc_%s(feature_dict)\\n\"%node_info_dict[\"no\"])\n \n traverse(node_info_dict[\"yes\"])\n traverse(node_info_dict[\"no\"])\n \n fw.write(\"\\ndef treeFunc_%d(feature_dict):\\n\"%tree_id)\n fw.write(\"\\treturn nodeFunc_%s(feature_dict)\\n\"%tree_node_id)\n traverse(tree_node_id) \n \n fw.write(\"\\ndef %s(feature_dict):\\n\"%predict_function_name)\n fw.write(\"\\tresult = 0.0\\n\")\n for tree_id in xrange(total_tree_num):\n fw.write(\"\\tresult += treeFunc_%d(feature_dict)\\n\"%tree_id)\n fw.write(\"\\treturn result\\n\")\n \n for tree_id in xrange(total_tree_num):\n #DFS for the gbdt tree \n print_one_tree(\"%d_0\"%tree_id)\n \n fw.close()\n os.popen(\"rm %s\"%self.tmp_file)", "def deserialize(self, str):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v15 = val1.position\n _x = _v15\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v16 = val1.orientation\n _x = _v16\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v17 = val1.stamp\n _x = _v17\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v18 = val1.position\n _x = _v18\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v19 = val1.orientation\n _x = _v19\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v20 = val1.position\n _x = _v20\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v21 = val1.orientation\n _x = _v21\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v22 = val1.position\n _x = _v22\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v23 = val1.orientation\n _x = _v23\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v24 = val1.stamp\n _x = _v24\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v25 = val1.position\n _x = _v25\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v26 = val1.orientation\n _x = _v26\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v27 = val1.position\n _x = _v27\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v28 = val1.orientation\n _x = _v28\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def instance_to_model(self):\n pass", "def model(flags):\n input_audio = tf.keras.layers.Input(\n shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),\n batch_size=flags.batch_size)\n net = input_audio\n\n if flags.preprocess == 'raw':\n # it is a self contained model, user need to feed raw audio only\n net = speech_features.SpeechFeatures(\n speech_features.SpeechFeatures.get_params(flags))(\n net)\n\n time_size, feature_size = net.shape[1:3]\n\n channels = parse(flags.channels)\n\n net = tf.keras.backend.expand_dims(net)\n\n if flags.debug_2d:\n conv_kernel = first_conv_kernel = (3, 3)\n else:\n net = tf.reshape(\n net, [-1, time_size, 1, feature_size]) # [batch, time, 1, feature]\n first_conv_kernel = (3, 1)\n conv_kernel = parse(flags.kernel_size)\n\n net = tf.keras.layers.Conv2D(\n filters=channels[0],\n kernel_size=first_conv_kernel,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n net = tf.keras.layers.Activation('relu')(net)\n\n if parse(flags.pool_size):\n net = tf.keras.layers.AveragePooling2D(\n pool_size=parse(flags.pool_size), strides=flags.pool_stride)(\n net)\n\n channels = channels[1:]\n\n # residual blocks\n for n in channels:\n if n != net.shape[-1]:\n stride = 2\n layer_in = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=1,\n strides=stride,\n padding='same',\n activation='linear')(\n net)\n layer_in = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n layer_in)\n layer_in = tf.keras.layers.Activation('relu')(layer_in)\n else:\n layer_in = net\n stride = 1\n\n net = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=conv_kernel,\n strides=stride,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n net = tf.keras.layers.Activation('relu')(net)\n\n net = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=conv_kernel,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n\n # residual connection\n net = tf.keras.layers.Add()([net, layer_in])\n net = tf.keras.layers.Activation('relu')(net)\n\n net = tf.keras.layers.AveragePooling2D(\n pool_size=net.shape[1:3], strides=1)(\n net)\n\n net = tf.keras.layers.Dropout(rate=flags.dropout)(net)\n\n # fully connected layer\n net = tf.keras.layers.Conv2D(\n filters=flags.label_count,\n kernel_size=1,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n\n net = tf.reshape(net, shape=(-1, net.shape[3]))\n return tf.keras.Model(input_audio, net)", "def model(inputs, is_training):\n\n tf.logging.info(FLAGS.model_structure)\n tf.logging.info(FLAGS.model_edge_weights)\n structure = json.loads(FLAGS.model_structure)\n\n if FLAGS.use_object_input:\n feature_shape = inputs[0].shape\n original_inputs = inputs[0]\n object_inputs = inputs[1]\n else:\n feature_shape = inputs.shape\n original_inputs = inputs\n object_inputs = None\n\n batch_size = feature_shape[0] // FLAGS.num_frames\n original_num_frames = FLAGS.num_frames\n num_frames = original_num_frames\n\n grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}\n for i in range(len(structure)):\n grouping[structure[i][0]].append(i)\n\n stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])\n\n assert stem_count != 0\n stem_filters = 128 // stem_count\n\n if grouping[-2]:\n # Instead of loading optical flows as inputs from data pipeline, we are\n # applying the \"Representation Flow\" to RGB frames so that we can compute\n # the flow within TPU/GPU on fly. It's essentially optical flow since we\n # do it with RGBs.\n flow_inputs = rf.rep_flow(\n original_inputs,\n batch_size,\n original_num_frames,\n num_iter=40,\n is_training=is_training,\n bottleneck=1,\n scope='rep_flow')\n streams = []\n\n for i in range(len(structure)):\n with tf.variable_scope('Node_' + str(i)):\n if structure[i][0] == -1:\n inputs = asn.rgb_conv_stem(original_inputs,\n original_num_frames,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -2:\n inputs = asn.flow_conv_stem(flow_inputs,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -3:\n # In order to use the object inputs, you need to feed your object\n # input tensor here.\n inputs = object_conv_stem(object_inputs,\n data_format)\n streams.append(inputs)\n else:\n block_number = structure[i][0]\n\n combined_inputs = [streams[structure[i][1][j]]\n for j in range(0, len(structure[i][1]))]\n\n tf.logging.info(grouping)\n nodes_below = []\n for k in range(-3, structure[i][0]):\n nodes_below = nodes_below + grouping[k]\n\n peers = []\n if FLAGS.attention_mode:\n lg_channel = -1\n tf.logging.info(nodes_below)\n for k in nodes_below:\n tf.logging.info(streams[k].shape)\n lg_channel = max(streams[k].shape[3], lg_channel)\n\n for node_index in nodes_below:\n attn = tf.reduce_mean(streams[node_index], [1, 2])\n\n attn = tf.layers.dense(\n inputs=attn,\n units=lg_channel,\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n peers.append(attn)\n\n combined_inputs = fusion_with_peer_attention(\n combined_inputs,\n index=i,\n attention_mode=FLAGS.attention_mode,\n attention_in=peers,\n use_5d_mode=False,\n data_format=data_format)\n\n graph = asn.block_group(\n inputs=combined_inputs,\n filters=structure[i][2],\n block_fn=block_fn,\n blocks=layers[block_number],\n strides=structure[i][4],\n is_training=is_training,\n name='block_group' + str(i),\n block_level=structure[i][0],\n num_frames=num_frames,\n temporal_dilation=structure[i][3],\n data_format=data_format)\n\n streams.append(graph)\n\n outputs = asn.multi_stream_heads(streams,\n grouping[3],\n original_num_frames,\n num_classes,\n data_format)\n\n return outputs", "def model_to_instance(model):\n pass", "def _tobuffer(self, object_):\n\n raise NotImplementedError", "def __pull_model(self):\n\n model = ArmModeler().get(self.name)\n\n if model:\n logger.debug(\"model creating...\")\n self.alpha = model[\"alpha\"]\n self.a = model[\"a\"]\n self.q = model[\"q\"]\n self.d = model[\"d\"]\n self.dh_params = model[\"dh_params\"]\n self.tf_matrices_list = model[\"transform_matrices\"]\n self.jacobian_matrix = model[\"jacobian_matrix\"]\n\n else:\n ArmModeler().create(self.name)\n self.__pull_model()", "def load_model(self) -> Any:", "def model_fn(model_dir):\n \n sym, arg_params, aux_params = mx.model.load_checkpoint('%s/102flowers' % model_dir, 0)\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)\n mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], label_shapes=mod._label_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=True)\n return mod", "def list_to_backend_type(data: List) -> TTensor:", "def test_model_to_model(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")", "def convert_keras_to_tflite(model,\n output_path,\n custom_objects=None,\n is_quantized=True,\n inference_type=None,\n inference_input_type=None,\n input_quant_params=(-128., 255.)):\n if custom_objects is None:\n custom_objects = {}\n\n if not compat.is_v1_apis():\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n else:\n _, keras_file = tempfile.mkstemp(\".h5\")\n tf.keras.models.save_model(model, keras_file)\n converter = tf.lite.TFLiteConverter.from_keras_model_file(\n keras_file, custom_objects=custom_objects)\n\n if is_quantized:\n if not compat.is_v1_apis():\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n else:\n converter.inference_type = tf.lite.constants.INT8\n converter.inference_input_type = tf.lite.constants.FLOAT\n # TODO(tfmot): should be able to make everything use the\n # same inference_type in TF 1.X tests.\n if inference_type:\n converter.inference_type = inference_type\n if inference_input_type:\n converter.inference_input_type = inference_input_type\n\n input_arrays = converter.get_input_arrays()\n converter.quantized_input_stats = {\n input_arrays[0]: input_quant_params\n } # mean, std_dev values for float to quantized int8 values.\n\n tflite_model = converter.convert()\n\n if output_path is not None:\n with open(output_path, \"wb\") as f:\n f.write(tflite_model)\n\n return tflite_model", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example[\"src_ids\"].values, example[\"tgt_ids\"].values, example[\"label\"][0]", "def convert_to_torch_script(model, input_size):\n model.eval()\n\n # An example input you would normally provide to your model's forward() method.\n example = torch.rand(1, 3, input_size, input_size)\n\n # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.\n traced_script_module = torch.jit.trace(model, example)\n\n return traced_script_module", "def _to_storage_model(self, store, result):\n file_dict = result.as_dict()\n file_dict.pop('object_type')\n file_dict['store'] = store\n return StorageModel(**file_dict)", "def render_switch_model():\n try:\n global model, model_dir_dict\n model_type = get_parameter(\"model\")\n model_file = get_parameter(\"file\", default=None)\n # if model is not None and (model[\"type\"] != model_type or model[\"file\"] != model_file):\n # # switch model\n # model[\"model\"].close()\n # del model\n # model = None\n # renju.logger.info(\"close old model, type=%s, file=%s\" % (model[\"type\"], model[\"file\"]))\n\n model_dir = model_dir_dict[model_type]\n # find avaliable model file\n if model_file is None:\n checkpoint = tf.train.get_checkpoint_state(model_dir)\n if not (checkpoint and checkpoint.model_checkpoint_path):\n renju.logger.warn(\"switch model error, not found avaliable model file\")\n return response(status=2)\n model_file = checkpoint.model_checkpoint_path\n model_file = model_file[model_file.rfind(\"/\") + 1:]\n # check model type\n if model[\"type\"] != model_type:\n renju.logger.warn(\"switch model error, model type not equal, (%s, %s)\" % (model_type, model[\"type\"]))\n return response(status=2)\n if model is None:\n model = {\"model\": renju.load_model(args, model_type, model_file),\n \"type\": model_type,\n \"dir\": model_dir + \"/\",\n \"file\": model_file}\n else:\n if model_file != model[\"file\"]:\n model[\"model\"].saver.restore(model[\"model\"].session, model[\"dir\"] + model_file)\n renju.logger.info(\"successful load model file: %s\" % model_file)\n return response(status=0)\n except:\n renju.logger.warn(\"switch model error, detail=%s\" % traceback.format_exc())\n return response(status=2)", "def _decode_record(record):\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"stroke_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"lmask\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n \"label_ids\": tf.FixedLenFeature([self.max_sen_len], tf.int64),\n }\n\n\n example = tf.parse_single_example(record, name_to_features)\n\n #int64 to int32\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n input_ids = example['input_ids']\n input_mask = example['input_mask']\n segment_ids = example['segment_ids']\n stroke_ids = example['stroke_ids']\n label_ids = example['label_ids']\n lmask = example['lmask']\n py_labels = tf.py_func(_get_py_seq, [label_ids], [tf.int32])\n\n return input_ids, input_mask, segment_ids, stroke_ids, lmask, label_ids, py_labels", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n return example", "def obj2tensor(pyobj, device='cuda'):\n storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))\n return torch.ByteTensor(storage).to(device=device)", "def PyTorchWrapper_v3(\n pytorch_model: \"torch.nn.Module\",\n convert_inputs: Optional[Callable] = None,\n convert_outputs: Optional[Callable] = None,\n mixed_precision: bool = False,\n grad_scaler: Optional[PyTorchGradScaler] = None,\n device: Optional[\"torch.device\"] = None,\n serialize_model: Optional[Callable[[Any], bytes]] = None,\n deserialize_model: Optional[Callable[[Any, bytes, \"torch.device\"], Any]] = None,\n) -> Model[Any, Any]:\n if convert_inputs is None:\n convert_inputs = convert_pytorch_default_inputs\n if convert_outputs is None:\n convert_outputs = convert_pytorch_default_outputs\n return Model(\n \"pytorch\",\n forward,\n attrs={\"convert_inputs\": convert_inputs, \"convert_outputs\": convert_outputs},\n shims=[\n PyTorchShim(\n pytorch_model,\n mixed_precision=mixed_precision,\n grad_scaler=grad_scaler,\n device=device,\n serialize_model=serialize_model,\n deserialize_model=deserialize_model,\n )\n ],\n dims={\"nI\": None, \"nO\": None},\n )", "def make_stax_model(self):", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def convert_mv(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Vec\")[0])\n y = _op.expand_dims(y, axis=-1)\n y = _op.transpose(y)\n out = _op.nn.dense(x, y)\n out = _op.squeeze(out, axis=[-1])\n g.add_node(op.output(\"Out\")[0], out)", "def samples_to_buffer(self, samples):\n return ModelSamplesToBuffer(\n observation=samples.env.observation,\n action=samples.agent.action,\n reward=samples.env.reward,\n done=samples.env.done,\n value=samples.agent.agent_info.p,\n )", "def export(self,\n output_dir: Text,\n tflite_path: Text = None,\n tensorrt: Text = None):\n signitures = self.signitures\n signature_def_map = {\n 'serving_default':\n tf.saved_model.predict_signature_def(\n {signitures['image_arrays'].name: signitures['image_arrays']},\n {signitures['prediction'].name: signitures['prediction']}),\n }\n b = tf.saved_model.Builder(output_dir)\n b.add_meta_graph_and_variables(\n self.sess,\n tags=['serve'],\n signature_def_map=signature_def_map,\n assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),\n clear_devices=True)\n b.save()\n logging.info('Model saved at %s', output_dir)\n\n # also save freeze pb file.\n graphdef = self.freeze()\n pb_path = os.path.join(output_dir, self.model_name + '_frozen.pb')\n tf.io.gfile.GFile(pb_path, 'wb').write(graphdef.SerializeToString())\n logging.info('Frozen graph saved at %s', pb_path)\n\n if tflite_path:\n height, width = utils.parse_image_size(self.params['image_size'])\n input_name = signitures['image_arrays'].op.name\n input_shapes = {input_name: [None, height, width, 3]}\n converter = tf.lite.TFLiteConverter.from_saved_model(\n output_dir,\n input_arrays=[input_name],\n input_shapes=input_shapes,\n output_arrays=[signitures['prediction'].op.name])\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]\n tflite_model = converter.convert()\n\n tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)\n logging.info('TFLite is saved at %s', tflite_path)\n\n if tensorrt:\n from tensorflow.python.compiler.tensorrt import trt # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\n trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())\n trt.create_inference_graph(\n None,\n None,\n precision_mode=tensorrt,\n input_saved_model_dir=output_dir,\n output_saved_model_dir=trt_path,\n session_config=sess_config)\n logging.info('TensorRT model is saved at %s', trt_path)", "def unbufferize(worker: AbstractWorker, protobuf_plan: PlanPB) -> \"Plan\":\n id_ = sy.serde.protobuf.proto.get_protobuf_id(protobuf_plan.id)\n\n role = sy.serde.protobuf.serde._unbufferize(worker, protobuf_plan.role)\n\n name = protobuf_plan.name\n tags = set(protobuf_plan.tags) if protobuf_plan.tags else None\n description = protobuf_plan.description if protobuf_plan.description else None\n input_types = sy.serde.protobuf.serde._unbufferize(worker, protobuf_plan.input_types)\n\n plan = Plan(\n role=role,\n include_state=protobuf_plan.include_state,\n is_built=True,\n id=id_,\n owner=worker,\n name=name,\n tags=tags,\n description=description,\n input_types=input_types,\n )\n\n if protobuf_plan.torchscript:\n torchscript = io.BytesIO(protobuf_plan.torchscript)\n plan.torchscript = torch.jit.load(torchscript)\n\n return plan", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(self, record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n # tf.logging.info(t)\n # t = tf.sparse.to_dense(t)\n # tf.logging.info(t.get_shape().as_list())\n # assert t.get_shape().as_list()[0] is not None\n example[name] = t\n \n del example[\"source_sos_ids\"]\n del example[\"source_sos_mask\"]\n\n return example", "def to_oskar_telescope_model(self, filename):\n pass", "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_lm_architecture(args)\n\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = getattr(\n args, \"tokens_per_sample\", DEFAULT_MAX_TARGET_POSITIONS\n )\n\n if args.character_embeddings:\n embed_tokens = CharacterTokenEmbedder(\n task.source_dictionary,\n eval(args.character_filters),\n args.character_embedding_dim,\n args.decoder_embed_dim,\n args.char_embedder_highway_layers,\n )\n elif args.adaptive_input:\n embed_tokens = AdaptiveInput(\n len(task.source_dictionary),\n task.source_dictionary.pad(),\n args.decoder_input_dim,\n args.adaptive_input_factor,\n args.decoder_embed_dim,\n options.eval_str_list(args.adaptive_input_cutoff, type=int),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n embed_tokens = cls.build_embedding(\n args, task.source_dictionary, args.decoder_input_dim\n )\n\n if args.tie_adaptive_weights:\n assert args.adaptive_input\n assert args.adaptive_input_factor == args.adaptive_softmax_factor\n assert (\n args.adaptive_softmax_cutoff == args.adaptive_input_cutoff\n ), \"{} != {}\".format(\n args.adaptive_softmax_cutoff, args.adaptive_input_cutoff\n )\n assert args.decoder_input_dim == args.decoder_output_dim\n\n decoder = LinearTransformerDecoder(\n args, task.target_dictionary, embed_tokens, no_encoder_attn=True\n )\n return cls(decoder)", "def make_tflite_inference(ndvi_img_array, model_interpreter):\n # Get input and output tensors.\n input_details = model_interpreter.get_input_details()\n output_details = model_interpreter.get_output_details()\n\n # Get Input shape\n input_shape = input_details[0]['shape']\n input_data = ndvi_img_array.reshape(input_shape)\n\n model_interpreter.set_tensor(input_details[0]['index'], input_data)\n model_interpreter.invoke()\n\n outputs = []\n\n for tensor in output_details:\n output_data = model_interpreter.get_tensor(tensor['index'])\n outputs.append(output_data[0][0])\n\n prediction = outputs[0]\n\n return prediction", "def assemblenet_plus_generator(block_fn,\n layers,\n num_classes,\n data_format='channels_last'):\n\n def model(inputs, is_training):\n \"\"\"Creation of the model graph.\"\"\"\n\n tf.logging.info(FLAGS.model_structure)\n tf.logging.info(FLAGS.model_edge_weights)\n structure = json.loads(FLAGS.model_structure)\n\n if FLAGS.use_object_input:\n feature_shape = inputs[0].shape\n original_inputs = inputs[0]\n object_inputs = inputs[1]\n else:\n feature_shape = inputs.shape\n original_inputs = inputs\n object_inputs = None\n\n batch_size = feature_shape[0] // FLAGS.num_frames\n original_num_frames = FLAGS.num_frames\n num_frames = original_num_frames\n\n grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}\n for i in range(len(structure)):\n grouping[structure[i][0]].append(i)\n\n stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])\n\n assert stem_count != 0\n stem_filters = 128 // stem_count\n\n if grouping[-2]:\n # Instead of loading optical flows as inputs from data pipeline, we are\n # applying the \"Representation Flow\" to RGB frames so that we can compute\n # the flow within TPU/GPU on fly. It's essentially optical flow since we\n # do it with RGBs.\n flow_inputs = rf.rep_flow(\n original_inputs,\n batch_size,\n original_num_frames,\n num_iter=40,\n is_training=is_training,\n bottleneck=1,\n scope='rep_flow')\n streams = []\n\n for i in range(len(structure)):\n with tf.variable_scope('Node_' + str(i)):\n if structure[i][0] == -1:\n inputs = asn.rgb_conv_stem(original_inputs,\n original_num_frames,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -2:\n inputs = asn.flow_conv_stem(flow_inputs,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -3:\n # In order to use the object inputs, you need to feed your object\n # input tensor here.\n inputs = object_conv_stem(object_inputs,\n data_format)\n streams.append(inputs)\n else:\n block_number = structure[i][0]\n\n combined_inputs = [streams[structure[i][1][j]]\n for j in range(0, len(structure[i][1]))]\n\n tf.logging.info(grouping)\n nodes_below = []\n for k in range(-3, structure[i][0]):\n nodes_below = nodes_below + grouping[k]\n\n peers = []\n if FLAGS.attention_mode:\n lg_channel = -1\n tf.logging.info(nodes_below)\n for k in nodes_below:\n tf.logging.info(streams[k].shape)\n lg_channel = max(streams[k].shape[3], lg_channel)\n\n for node_index in nodes_below:\n attn = tf.reduce_mean(streams[node_index], [1, 2])\n\n attn = tf.layers.dense(\n inputs=attn,\n units=lg_channel,\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n peers.append(attn)\n\n combined_inputs = fusion_with_peer_attention(\n combined_inputs,\n index=i,\n attention_mode=FLAGS.attention_mode,\n attention_in=peers,\n use_5d_mode=False,\n data_format=data_format)\n\n graph = asn.block_group(\n inputs=combined_inputs,\n filters=structure[i][2],\n block_fn=block_fn,\n blocks=layers[block_number],\n strides=structure[i][4],\n is_training=is_training,\n name='block_group' + str(i),\n block_level=structure[i][0],\n num_frames=num_frames,\n temporal_dilation=structure[i][3],\n data_format=data_format)\n\n streams.append(graph)\n\n outputs = asn.multi_stream_heads(streams,\n grouping[3],\n original_num_frames,\n num_classes,\n data_format)\n\n return outputs\n\n model.default_image_size = 224\n return model", "def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model", "def to_tfrecord(data_blob):\n\n id = np.array(data_blob['id'], dtype=np.int32).tobytes()\n dim = np.array(data_blob['images'].shape, dtype=np.int32).tobytes()\n\n images = np.array(data_blob['images'], dtype=np.uint8).tobytes()\n poses = np.array(data_blob['poses'], dtype=np.float32).tobytes()\n depth = np.array(data_blob['depth'], dtype=np.float32).tobytes()\n filled = np.array(data_blob['filled'], dtype=np.float32).tobytes()\n intrinsics = np.array(data_blob['intrinsics'], dtype=np.float32).tobytes()\n\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[id])),\n 'dim': tf.train.Feature(bytes_list=tf.train.BytesList(value=[dim])),\n 'images': tf.train.Feature(bytes_list=tf.train.BytesList(value=[images])),\n 'poses': tf.train.Feature(bytes_list=tf.train.BytesList(value=[poses])),\n 'depth': tf.train.Feature(bytes_list=tf.train.BytesList(value=[depth])),\n 'filled': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filled])),\n 'intrinsics': tf.train.Feature(bytes_list=tf.train.BytesList(value=[intrinsics])),\n }))\n\n return example", "def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.trained_model_params[1])", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def __init__(self, model_name: str, label_file: str) -> None:\n\n # Append TFLITE extension to model_name if there's no extension\n _, ext = os.path.splitext(model_name)\n if not ext:\n model_name += '.tflite'\n\n # Initialize the TFLite model.\n interpreter = Interpreter(model_path=model_name, num_threads=4)\n interpreter.allocate_tensors()\n\n self._input_index = interpreter.get_input_details()[0]['index']\n self._output_index = interpreter.get_output_details()[0]['index']\n self._interpreter = interpreter\n\n self.pose_class_names = self._load_labels(label_file)", "def decode():\n\n with tf.device('/cpu:0'):\n dataset_test = SequenceDataset(\n subset=\"test\",\n config_dir=FLAGS.config_dir,\n data_dir=FLAGS.data_dir,\n batch_size=1,\n input_size=FLAGS.input_dim,\n output_size=FLAGS.output_dim,\n infer=True,\n name=\"dataset_test\")()\n\n model = TfModel(\n rnn_cell=FLAGS.rnn_cell,\n dnn_depth=FLAGS.dnn_depth,\n dnn_num_hidden=FLAGS.dnn_num_hidden,\n rnn_depth=FLAGS.rnn_depth,\n rnn_num_hidden=FLAGS.rnn_num_hidden,\n output_size=FLAGS.output_dim,\n bidirectional=FLAGS.bidirectional,\n rnn_output=FLAGS.rnn_output,\n cnn_output=FLAGS.cnn_output,\n look_ahead=FLAGS.look_ahead,\n mdn_output=FLAGS.mdn_output,\n mix_num=FLAGS.mix_num,\n name=\"tf_model\")\n\n # Build the testing model and get test output sequence.\n test_iterator = dataset_test.batched_dataset.make_one_shot_iterator()\n input_sequence, input_sequence_length = test_iterator.get_next()\n test_output_sequence_logits, test_final_state = model(\n input_sequence, input_sequence_length)\n\n show_all_variables()\n\n saver = tf.train.Saver()\n\n # Decode.\n with tf.Session() as sess:\n # Run init\n sess.run(tf.global_variables_initializer())\n\n if not restore_from_ckpt(sess, saver): sys.exit(-1)\n\n # Read cmvn to do reverse mean variance normalization\n cmvn = np.load(os.path.join(FLAGS.data_dir, \"train_cmvn.npz\"))\n\n num_batches = 0\n used_time_sum = frames_sum = 0.0\n while True:\n try:\n time_start = time.time()\n logits = sess.run(test_output_sequence_logits)\n time_end = time.time()\n\n used_time = time_end - time_start\n used_time_sum += used_time\n frame_num = logits.shape[1]\n frames_sum += frame_num\n\n # Squeeze batch dimension.\n logits = logits.squeeze(axis=0)\n\n if FLAGS.mdn_output:\n out_pi = logits[:, : FLAGS.mix_num]\n out_mu = logits[:, FLAGS.mix_num : (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim)]\n out_sigma = logits[:, (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim) :]\n\n max_index_pi = out_pi.argmax(axis=1)\n result_mu = []\n for i in xrange(out_mu.shape[0]):\n beg_index = max_index_pi[i] * FLAGS.output_dim\n end_index = (max_index_pi[i] + 1) * FLAGS.output_dim\n result_mu.append(out_mu[i, beg_index:end_index])\n logits = np.vstack(result_mu)\n\n sequence = logits * cmvn[\"stddev_labels\"] + cmvn[\"mean_labels\"]\n\n out_dir_name = os.path.join(FLAGS.save_dir, \"test\", \"cmp\")\n out_file_name =os.path.basename(\n dataset_test.tfrecords_lst[num_batches]).split('.')[0] + \".cmp\"\n out_path = os.path.join(out_dir_name, out_file_name)\n write_binary_file(sequence, out_path, with_dim=False)\n #np.savetxt(out_path, sequence, fmt=\"%f\")\n\n tf.logging.info(\n \"writing inferred cmp to %s (%d frames in %.4f seconds)\" % (\n out_path, frame_num, used_time))\n num_batches += 1\n except tf.errors.OutOfRangeError:\n break\n\n tf.logging.info(\"Done decoding -- epoch limit reached (%d \"\n \"frames per second)\" % int(frames_sum / used_time_sum))", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def _decode_record(record, name_to_features):\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example", "def from_tensorflow(self, graph, layout=\"NHWC\", shape=None, outputs=None):\n func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs)\n self._mod[\"main\"] = func\n return self._mod, self._params", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 1\n (self.type,) = _struct_B.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model = str[start:end].decode('utf-8')\n else:\n self.model = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.head_version = str[start:end].decode('utf-8')\n else:\n self.head_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.body_version = str[start:end].decode('utf-8')\n else:\n self.body_version = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.arm_version = str[start:end].decode('utf-8')\n else:\n self.arm_version = str[start:end]\n _x = self\n start = end\n end += 14\n (_x.has_laser, _x.has_extended_arms, _x.number_of_legs, _x.number_of_arms, _x.number_of_hands,) = _struct_2B3i.unpack(str[start:end])\n self.has_laser = bool(self.has_laser)\n self.has_extended_arms = bool(self.has_extended_arms)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])", "def translate_frames(model, frames):\n frames_v = autograd.Variable(torch.FloatTensor(frames).cuda())\n out_frames = model(frames_v)\n return out_frames.cpu().numpy()", "def build_model():", "def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):\n options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n if is_deepspeed_available():\n options += (DeepSpeedEngine,)\n\n while isinstance(model, options):\n model = model.module\n\n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n original_forward = model.__dict__.pop(\"_original_forward\", None)\n if original_forward is not None:\n while hasattr(forward, \"__wrapped__\"):\n forward = forward.__wrapped__\n if forward == original_forward:\n break\n model.forward = forward\n if getattr(model, \"_converted_to_transformer_engine\", False):\n convert_model(model, to_transformer_engine=False)\n return model", "def convert_to_tf_record(_):\n\n mnist = input_data.read_data_sets(\n \"/tmp/tensorflow/mnist/input_data\",\n reshape=False\n )\n\n convert_to(mnist.validation, 'validation', FLAGS.data_directory)\n convert_to(mnist.train, 'train', FLAGS.data_directory, num_shards=10)\n convert_to(mnist.test, 'test', FLAGS.data_directory)", "def __init__(self, saved_model=None, serialize_input=True):\n assert saved_model\n self.saved_model_path = saved_model\n self.serialize_input = serialize_input\n logging.info(\"Reading checkpoint {}.\".format(saved_model))\n imported_model = tf.saved_model.load(saved_model)\n self.bleurt_model_ops = imported_model.signatures[\"serving_default\"]\n logging.info(\"BLEURT initialized.\")", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def dump_model(self):", "def backend_specific_model(model: TModel, tmp_dir: str):", "def _convert_to_example(image_buffer, label, height, width):\n\n channels = 3\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'depth': _int64_feature(channels),\n 'label': _int64_feature(label),\n 'image': _bytes_feature(image_buffer.tostring())}))\n return example", "def _decode_record(record, name_to_features):\n example = tf.io.parse_single_example(serialized=record, features=name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, dtype=tf.int32)\n example[name] = t\n\n return example", "def to_raw_tx(self):\n return b2x(self.tx.serialize())", "def _decode_record(record, name_to_features):\n\t\t\texample = tf.parse_single_example(record, name_to_features)\n\n\t\t\t# tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n\t\t\t# So cast all int64 to int32.\n\t\t\tfor name in list(example.keys()):\n\t\t\t\tt = example[name]\n\t\t\t\tif t.dtype == tf.int64:\n\t\t\t\t\tt = tf.to_int32(t)\n\t\t\t\texample[name] = t\n\n\t\t\treturn example", "def model(self) -> Type[Model]:" ]
[ "0.6981948", "0.6846798", "0.6283583", "0.61803657", "0.5930052", "0.5720828", "0.5706919", "0.56350565", "0.5599901", "0.5577139", "0.55073345", "0.55071956", "0.5415228", "0.53956825", "0.53790677", "0.5360498", "0.5250914", "0.52445364", "0.5237697", "0.52300274", "0.52069974", "0.5206539", "0.51938456", "0.5193306", "0.5192036", "0.5169634", "0.51580673", "0.51526356", "0.51375127", "0.5095351", "0.50871783", "0.50740665", "0.50633276", "0.50561893", "0.5041783", "0.5025723", "0.502168", "0.5016881", "0.5016687", "0.50138927", "0.50120157", "0.5008365", "0.5007873", "0.50009257", "0.49908", "0.4989156", "0.4986876", "0.49851608", "0.4981303", "0.49762675", "0.4968537", "0.49683928", "0.49632818", "0.49593914", "0.49471152", "0.49453023", "0.49284622", "0.49218518", "0.49172986", "0.49153858", "0.49014932", "0.48952913", "0.48890775", "0.48849857", "0.48823428", "0.48757857", "0.48755497", "0.48753807", "0.48705703", "0.4869357", "0.48662513", "0.4861165", "0.484931", "0.48463133", "0.4844941", "0.48442042", "0.48419932", "0.48355207", "0.48325467", "0.48309866", "0.48307186", "0.4829653", "0.4829653", "0.4829653", "0.48292747", "0.48210022", "0.48158386", "0.48100388", "0.48099363", "0.48071668", "0.48065197", "0.48031917", "0.48000434", "0.47964194", "0.4791977", "0.47918805", "0.47881207", "0.4784061", "0.47816414", "0.47770634" ]
0.7927206
0
This method generates runtime.Modules for the tests
def compile_models( models: Union[List[AOTTestModel], AOTTestModel], interface_api: str, use_unpacked_api: bool, workspace_byte_alignment: int = 8, constant_byte_alignment: int = 8, enable_op_fusion: bool = True, pass_config: Dict[str, Any] = None, use_runtime_executor: bool = True, target: tvm.target.Target = tvm.target.Target("c"), workspace_memory_pools=None, constant_memory_pools=None, schedule_name: str = None, ) -> List[AOTCompiledTestModel]: if not isinstance(models, list): models = [models] runtime = Runtime("crt") executor = Executor( "aot", { "workspace-byte-alignment": workspace_byte_alignment, "constant-byte-alignment": constant_byte_alignment, "interface-api": interface_api, "unpacked-api": use_unpacked_api, }, ) config = {"tir.disable_vectorize": True} if pass_config: config = {**config, **pass_config} if not enable_op_fusion: config["relay.FuseOps.max_depth"] = 1 compiled_mods = list() for model in models: with contextlib.ExitStack() as context_stack: if schedule_name: # Testing with deterministic schedule task_list = autotvm.task.extract_from_program( model.module, target=target, params=model.params ) context_stack.enter_context( tvm.autotvm.apply_fixed_config(task_list, schedule_name) ) context_stack.enter_context(tvm.transform.PassContext(opt_level=3, config=config)) build_kwargs = dict( ir_mod=model.module, params=model.params, mod_name=model.name, ) # TODO(Mousius) - Remove once executor/runtime are fully removed from Target if use_runtime_executor: build_kwargs.update( dict( target=target, executor=executor, runtime=runtime, workspace_memory_pools=workspace_memory_pools, constant_memory_pools=constant_memory_pools, ) ) else: build_kwargs.update(dict(target=tvm.target.Target(target, host=target))) executor_factory = tvm.relay.build(**build_kwargs) compiled_mods.append( AOTCompiledTestModel(model=model, executor_factory=executor_factory) ) return compiled_mods
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_module(self):\n pass", "def make_modules(self, config):\n pass", "def modules():", "def test():\n return _make_modules(is_train=False)", "def setUp(self):\n self.modules = {}", "def test_generate_all_testing(self):\n pass", "def test_create_modules_5(self):\n basedir = testdata.create_dir()\n prefix = \"testdata_cm4\"\n r = testdata.create_modules({\n \"foo\": [\n \"class Foo(object): pass\",\n ],\n \"bar\": [\n \"class Bar(object): pass\",\n ],\n }, tmpdir=basedir, modpath=prefix)\n\n p = os.path.join(basedir, prefix)\n self.assertTrue(os.path.isdir(p))\n\n p2 = os.path.join(p, prefix)\n self.assertFalse(os.path.isdir(p2))", "def create_modules(*names):\n source = 'attr = {0!r}'\n created_paths = []\n mapping = {}\n state_manager = None\n uncache_manager = None\n try:\n temp_dir = tempfile.mkdtemp()\n mapping['.root'] = temp_dir\n import_names = set()\n for name in names:\n if not name.endswith('__init__'):\n import_name = name\n else:\n import_name = name[:-len('.__init__')]\n import_names.add(import_name)\n if import_name in sys.modules:\n del sys.modules[import_name]\n name_parts = name.split('.')\n file_path = temp_dir\n for directory in name_parts[:-1]:\n file_path = os.path.join(file_path, directory)\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n created_paths.append(file_path)\n file_path = os.path.join(file_path, name_parts[-1] + '.py')\n with open(file_path, 'w') as file:\n file.write(source.format(name))\n created_paths.append(file_path)\n mapping[name] = file_path\n uncache_manager = util.uncache(*import_names)\n uncache_manager.__enter__()\n state_manager = util.import_state(path=[temp_dir])\n state_manager.__enter__()\n yield mapping\n finally:\n if state_manager is not None:\n state_manager.__exit__(None, None, None)\n if uncache_manager is not None:\n uncache_manager.__exit__(None, None, None)\n support.rmtree(temp_dir)", "def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")", "def generate_basic_modules(template_dir=TEMPLATE_DIR, out_dir=PKG_DIR):\n print(80 * \"-\")\n print(\"Package:\", out_dir)\n\n basic_modules = [\"_init.py\",\n \"constants.py\",\n \"base_api.py\",\n \"exception.py\"]\n\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n installed = []\n for module in basic_modules:\n in_file = os.path.join(template_dir, module)\n\n if module == \"_init.py\":\n module = \"__init__.py\"\n\n out_file = os.path.join(out_dir, module)\n try:\n shutil.copy(in_file, out_file)\n except (FileNotFoundError, shutil.SameFileError) as err:\n print(err)\n installed.append(\"- \" + out_file)\n\n print(\"Basic modules:\")\n print(\"\\n\".join(installed))", "def testgen(self):\n self.parse()\n self.generate()", "def __setup_modules(self, config, db, rcontext):\n DEPTH_ROOT = 0\n DEPTH_TYPE = 1\n DEPTH_SUBTYPE = 2\n\n for root, sub_folders, files in os.walk(\"modules\"):\n nicepath = os.path.relpath(root, \"modules\")\n fullpath = root\n\n if nicepath == '.':\n depth = DEPTH_ROOT\n else:\n depth = nicepath.count(os.path.sep) + 1\n\n if depth > DEPTH_SUBTYPE:\n warnings.warn(\"sub-subdirectory in module (%s) \\\n ignored.\" % nicepath)\n\n modulenamebase = nicepath.replace(os.path.sep, '.')\n mimetype = nicepath.replace(os.path.sep, '/')\n\n if depth != DEPTH_ROOT:\n # Each folder should except root have an __init__.py,\n # otherwise the directory name be assigned as a module.\n if not \"__init__.py\" in files:\n warnings.warn(\"__init__.py not found in \\\n module folder '%s'.\" % nicepath)\n continue\n\n modulepath = fullpath + os.path.sep + \"__init__.py\"\n module = Module(modulepath, modulenamebase, mimetype)\n self.modules.append(module)\n\n # Now load each handler .py file\n for file in files:\n modulenameend, extension = os.path.splitext(file)\n if extension.lower() == \".py\":\n is_init = file == \"__init__.py\"\n modulepath = fullpath + os.path.sep + file\n modulename = None\n if is_init:\n modulename = modulenamebase\n elif depth == DEPTH_ROOT:\n modulename = modulenameend\n else:\n modulename = modulenamebase + '.' + modulenameend\n\n module = Module(modulepath, modulename, mimetype,\n is_global=(depth == DEPTH_ROOT),\n as_mime_handler=not is_init)\n if module.is_mime_handler and not rcontext.is_recursive:\n db.setup_module_table(module.md5_tablename,\n module.columndefinition)\n\n self.modules.append(module)", "def create_modules(self):\n self.nmos = ptx(width=self.nmos_size,\n mults=self.nmos_mults,\n tx_type=\"nmos\")\n self.add_mod(self.nmos)\n\n self.pmos = ptx(width=self.pmos_size,\n mults=self.pmos_mults,\n tx_type=\"pmos\")\n self.add_mod(self.pmos)", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def create_test_modules(nbdir, name):\n\n if os.path.exists(f'test_{name}.py'):\n ans = input(\n f'test file already exist, overwrite test_{name}.py? [y/n]')\n if ans.lower() == 'y':\n print(f'overwriting test_{name}.py')\n else:\n print('abort')\n return None\n\n notebook_lst = get_notebooks(nbdir)\n\n with open(f'test_{name}.py', 'w') as f:\n f.write(_create_test_module_heading(nbdir))\n\n nb_names = []\n for nb_path in notebook_lst:\n # check if output has to be cleared\n if os.path.split(nb_path)[-1] in _keep_output_list:\n clearoutput = False\n else:\n clearoutput = True\n\n # get proper name for function\n nb_name = os.path.splitext(os.path.split(nb_path)[-1])[0]\n nb_name = nb_name.replace(' ', '_').replace('-', '_')\n while nb_name in nb_names:\n nb_name += '_'\n nb_names.append(nb_name)\n\n # write function\n with open(f'test_{name}.py', 'a') as f:\n f.write(_create_test_func(nb_name, nb_path, clearoutput))", "def test_types_python(self):\n self.single_file_generator('py', PythonGenerator, filtr=metadata_filter)\n\n # Make sure the python is valid\n with open(os.path.join(self.source_path, 'types.py')) as f:\n pydata = f.read()\n spec = compile(pydata, 'test', 'exec')\n module = ModuleType('test')\n exec(spec, module.__dict__)", "def create_modules( package ):\n\n #we need to make the package directory.\n #we need to make the folder that this\n #parsed file will live in.\n # currentPath + package\n paths = package.split( \".\" )\n package = os.path.join( \"./\", os.path.join( *paths ) )\n os.makedirs( package )\n\n #Create the __init__.py files\n temp = \"./\"\n for p in paths:\n temp = os.path.join( temp, p )\n open( \"%s/__init__.py\" % temp, \"a\" ).close()", "def generate(self, api):\n for namespace in api.namespaces.values():\n # One module per namespace is created. The module takes the name\n # of the namespace.\n with self.output_to_relative_path('{}.py'.format(namespace.name)):\n self._generate_namespace_module(namespace)", "def _setup_modules(self):\r\n module_registry = AppModule.module_registry()\r\n for bundle in topological_sort(AppModule.module_dependencies()):\r\n for module_label in bundle:\r\n assert module_label in module_registry\r\n module = module_registry[module_label]\r\n self._debug_log('Initializing: %s (%s)' % (module.label(), module.description()))\r\n try:\r\n module.setup_function()\r\n except AppModule.Unimplemented:\r\n pass\r\n self._init_modules.append(module.label())", "def create_model(self):\n self.skipTest(\"Base module should not be tested.\")", "def makemodules(self):\n names = get_unique_name_list_from_class_list(self.cls_modules)\n for cls, name in zip(self.cls_modules, names):\n self.makemodule(name, cls)", "def MODULES(self):\n pass", "def setUpModule():\n setUpAll()", "def test_make_module_text(self):\n import usercode\n usercode_sample_re = re.compile(r'^==========*\\n', re.M)\n saved_sample = usercode_sample_re.split(usercode.__doc__)[1]\n\n gcode = gencode.GenCode()\n gcode.make_module(self.schema)\n generated = gcode.get_user_text()\n self.assertEqual(generated, saved_sample, \"Generated code doesn't match sample:\\n\" +\n \"\".join(difflib.unified_diff(generated.splitlines(True),\n saved_sample.splitlines(True),\n fromfile=\"generated\",\n tofile=\"usercode.py\")))", "def _create_module(self, rootdir):\n name = 'module_' + rootdir.get_name()\n moduleobj = Module(name, rootdir)\n rootdir.set_module(moduleobj)\n self._modules[name] = moduleobj", "def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise", "def _createModuleObj(self):\n ModuleTimeWeakening.__init__(self)\n return", "def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)", "def setUpModule():\n print(__doc__)\n\n # Module requirements\n ftest_module_setup()", "def generateTests(self, dict, dictname='totest',\n testmethod='test_parser'):\n for name, cases in dict.items():\n for casenum in range(len(cases)):\n case = cases[casenum]\n run_in_debugger = 0\n if len(case)==3:\n if case[2]:\n run_in_debugger = 1\n else:\n continue\n self.addTestCase(\n PythonModuleParserTestCase, testmethod,\n input=case[0], expected=case[1],\n id='%s[%r][%s]' % (dictname, name, casenum),\n run_in_debugger=run_in_debugger)", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def setUp(self):\n module = __import__(self.moduleName)\n components = self.moduleName.split('.')\n for component in components[1:]:\n module = getattr(module, component)\n \n self.suite.addTest(self._getTestSuite(module = module))", "def setUpClass(cls):\n #First get the source directory setup correctly. For these tests\n #we are going to use the config libraries instead of duplicating\n #the code. Only the one module being wrapped will get copied.\n cls.fdir = path.join(path.abspath(\".\"), \"ftypes\")\n cls.source = path.join(cls.fdir, \"symmetry_module.f90\")\n cls._check_dir()\n \n settings.use_filesystem_cache = False\n cls.parser = CodeParser()\n cls.parser.parse(cls.source)\n\n cls.dependencies = [\"{}_c\".format(m) for m in cls.parser.modules]\n cls.originals = list(cls.parser.modules.keys())\n cls.writers = {}\n\n #Setup the default test data the we use to run the python wrapper module\n #methods after ftypes is done.\n cls.lattice = array([[.5,.5,0],[0,.5,.5],[.5,0,.5]])\n cls.atomtypes = array([1])\n cls.basis = array([[0],[0],[0]])", "def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)", "def test_swift_globals(self):\n self.build()\n self.do_test()", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def test_core_modules(testing_config):\n cache_dir = Path(testing_config.src_cache_root, \".conda-build\", \"pickled.cb\")\n perl_version = testing_config.variant.get(\n \"perl\", get_default_variant(testing_config)[\"perl\"]\n )\n core_modules = get_core_modules_for_this_perl_version(perl_version, str(cache_dir))\n assert \"Config\" in core_modules\n assert \"Module::Build\" not in core_modules", "def test_module(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"basic.html\",\n ),\n \"\",\n )\n content = self._get_fake_project_module()\n\n expected = textwrap.dedent(\n '''\\\n #!/usr/bin/env python\n # -*- coding: utf-8 -*-\n\n \"\"\"A module that shows every type of documentable class / method / function.\n\n Attributes:\n ATTRIBUTE_VALUE (float):\n Some number.\n\n \"\"\"\n\n\n ATTRIBUTE_VALUE = 14.3\n\n\n class MyKlass(object):\n \"\"\"A class that does something.\n\n Multi-line information here.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"asdfasdf\"\n\n def __init__(self, value):\n \"\"\"Create this instance.\"\"\"\n # A comment that should show up in the unittest's results\n super(MyKlass, self).__init__()\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n\n class ParentClass(object):\n \"\"\"The outter class.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"tttt\"\n\n class NestedClass(object):\n \"\"\"A class within a class.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"zzzzzzzzzzzzz\"\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n\n def _set_private_function_thing(value, another):\n \"\"\"Do something here.\"\"\"\n # Do something with these values\n # and more comment text, here.\n #\n if value:\n return 2\n\n # Another comment\n return 1\n\n\n def set_function_thing(value, another):\n \"\"\"Do something here.\"\"\"\n # Do something with these values\n # and more comment text, here.\n #\n if value:\n return 2\n\n # Another comment\n return 1'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter", "def test_1_make(self):\n #We can compile all these modules together into a single shared library.\n writer = self.writers.values()[0]\n self.code = writer.make(remake=True, dependencies=self.dependencies)\n self.assertEqual(self.code, 0)", "def generate(self):\n py_gen = PythonGenerator([], \"\", self.plugin_stub.description)\n code_fragments = {\n \"autogenerated_module_path\": self.language.wrap_directory.path,\n \"autogenerated_module\": self.language.get_python_autogenerated_module().path,\n \"new_autogenerated_module\": self.language.get_python_new_autogenerated_module().path,\n \"fix_plugin\": py_gen.make_fix_plugin(),\n \"module_namespace\": \"_madz__{}\".format(str(self.plugin_stub.id.namespace).replace(\".\", \"__\")),\n \"init_path\": self.language.get_plugin_init().path,\n \"ctypes_wrapper_path\": self.language.get_python_ctypes_wrapper().path,\n \"module_hooks\": py_gen.make_module_hook(),\n \"type_accessors\" : py_gen.make_type_accessor(None),\n \"cleanup_code\": py_gen.make_cleanup_code(None),\n \"imported_functions\": \"\",\n \"in_structs\": \"\",\n \"dep_module_hooks\": \"\",\n \"dep_cleanup_code\": \"\",\n \"imp_module_hooks\": \"\",\n \"imp_cleanup_code\": \"\",\n \"typedefs\": \"\",\n \"functions\": py_gen.make_def_function_types(),\n \"out_structs\": py_gen.make_out_struct(),\n \"plugin_cname\": self.language.output_directory.file(\"{}.madz\".format(self.plugin_stub.id.namespace)).path,\n \"function_callbacks\": py_gen.make_function_callbacks(),\n \"function_stubs\": py_gen.make_function_stubs()\n }\n\n cstdlib = {\n \"windows\": \"'MSVCRT'\",\n \"unix\": \"'c'\",\n \"osx\": \"'c'\"\n }[config_target.get(OptionPlatformOperatingSystem)]\n\n self.prep()\n self._pre_header =\"#include \\\"Python.h\\\"\\n\"\n self._post_header = py_gen.make_c_header()\n\n c_wrapgen.WrapperGenerator.generate(self)\n\n c_source = py_gen.make_c_init(self.language.get_python_code_filename())\n c_source += py_gen.make_get_out_struct()\n c_source += py_gen.make_get_python_out_struct()\n c_source += py_gen.make_c_function_stubs()\n\n all_deps = self.plugin_stub.gen_recursive_loaded_depends()\n # depends plugins python\n for dep in all_deps:\n gen = PythonGenerator([], dep.id.namespace, dep.description)\n\n code_fragments[\"imported_functions\"] += gen.make_def_function_types()\n code_fragments[\"typedefs\"] += gen.make_typedefs()\n code_fragments[\"in_structs\"] += gen.make_out_struct()\n code_fragments[\"dep_module_hooks\"] += \" \" + gen.make_module_hook()\n code_fragments[\"dep_cleanup_code\"] += \"{}\\n{}\".format(gen.make_type_accessor(False), gen.make_cleanup_code(False))\n\n c_source += gen.make_get_in_struct()\n\n # imports plugins python\n for imp in self.plugin_stub.gen_required_loaded_imports():\n if not (imp in all_deps):\n gen = PythonGenerator([], imp.id.namespace, imp.description)\n\n code_fragments[\"imported_functions\"] += gen.make_def_function_types()\n code_fragments[\"typedefs\"] += gen.make_typedefs()\n code_fragments[\"in_structs\"] += gen.make_out_struct()\n code_fragments[\"imp_module_hooks\"] += \" \" + gen.make_module_hook()\n code_fragments[\"imp_cleanup_code\"] += \"{}\\n{}\".format(gen.make_type_accessor(True), gen.make_cleanup_code(True))\n\n c_source += gen.make_get_in_struct()\n\n # This plugins python\n code_fragments[\"typedefs\"] += py_gen.make_typedefs()\n\n module_string = self.autogenerated_module_template.format(cstdlib = cstdlib)\n with self.language.get_python_autogenerated_module().pyopen(\"w\") as f:\n f.write(module_string)\n\n with self.language.get_python_new_autogenerated_module().pyopen(\"w\") as f:\n f.write(module_string)\n\n with self.language.get_python_ctypes_wrapper().pyopen(\"w\") as f:\n f.write(self.ctypes_wrapper_template)\n\n with self.language.get_c_code_filename().pyopen(\"a\") as f:\n f.write(\"\\n{}\\n\".format(c_source))\n\n with self.language.get_python_code_filename().pyopen(\"w\") as f:\n f.write(self.py_template.format(**code_fragments))", "def get_all_test_modules():\n test_modules = []\n current_directory = os.path.dirname(__file__)\n sys.path.insert(0, os.path.join(current_directory, '..'))\n files = sorted(os.listdir(current_directory))\n for file in files:\n if file.startswith('test') and file.endswith('.py'):\n test_modules.append(file.rstrip('.py'))\n\n return test_modules", "def setUpModule():\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer(False)", "def apps_modules_setup(test_case):\n test_case.app.add_module(Module.new_module(\"Module0\", \"en\"))\n test_case.app.save()\n\n test_case.other_app = Application.new_app(test_case.project.name, \"OtherApp\")\n test_case.other_app.add_module(Module.new_module(\"Module0\", \"en\"))\n test_case.other_app.save()\n\n test_case.deleted_app = Application.new_app(test_case.project.name, \"DeletedApp\")\n test_case.deleted_app.add_module(Module.new_module(\"Module0\", \"en\"))\n test_case.deleted_app.save()\n test_case.deleted_app.delete_app()\n test_case.deleted_app.save() # delete_app() changes doc_type. This save() saves that.\n\n test_case.linked_app = create_linked_app(test_case.project.name, test_case.app.id,\n test_case.project.name, 'LinkedApp')\n try:\n yield\n finally:\n Application.get_db().delete_doc(test_case.linked_app.id)\n Application.get_db().delete_doc(test_case.deleted_app.id)\n Application.get_db().delete_doc(test_case.other_app.id)", "def setup_module():\n\n c = Config()\n if c.get('general', 'in_production'): # pragma: no cover\n raise RuntimeError(\"DO NOT run destructive test on production system\")\n\n \"Pull in the filesystem dump from a previous mirth run\"\n mi = MirthInteraction()\n mi.restore_database()\n\n \"Run a quick sanity check, whole module requires a populated db\"\n connection = db_connection('warehouse')\n count = connection.session.query(HL7_Msh).count()\n connection.disconnect()\n\n if count < 4000:\n err = \"Minimal expected count of records not present. \"\\\n \"Be sure to run 'process_testfiles_via_mirth' as a prerequisite\"\n raise RuntimeError(err)", "def test():\n\t\treturn [\"vice.core.objects.tests\",\n\t\t\t[\n\t\t\t\tagb.test_agb_grid_constructor(),\n\t\t\t\tagb.test_agb_grid_destructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_constructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_destructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_constructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_destructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_constructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_destructor(),\n\t\t\t\tchannel.test_channel_constructor(),\n\t\t\t\tchannel.test_channel_destructor(),\n\t\t\t\telement.test_element_constructor(),\n\t\t\t\telement.test_element_destructor(),\n\t\t\t\tfromfile.test_fromfile_constructor(),\n\t\t\t\tfromfile.test_fromfile_destructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_constructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_destructor(),\n\t\t\t\timf.test_imf_constructor(),\n\t\t\t\timf.test_imf_destructor(),\n\t\t\t\tintegral.test_integral_constructor(),\n\t\t\t\tintegral.test_integral_destructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_constructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_destructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_constructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_destructor(),\n\t\t\t\tism.test_ism_constructor(),\n\t\t\t\tism.test_ism_destructor(),\n\t\t\t\tmdf.test_mdf_constructor(),\n\t\t\t\tmdf.test_mdf_destructor(),\n\t\t\t\tmigration.test_migration_constructor(),\n\t\t\t\tmigration.test_migration_destructor(),\n\t\t\t\tmultizone.test_multizone_constructor(),\n\t\t\t\tmultizone.test_multizone_destructor(),\n\t\t\t\tsinglezone.test_singlezone_constructor(),\n\t\t\t\tsinglezone.test_singlezone_destructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_constructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_destructor(),\n\t\t\t\tssp.test_ssp_constructor(),\n\t\t\t\tssp.test_ssp_destructor(),\n\t\t\t\ttracer.test_tracer_constructor(),\n\t\t\t\ttracer.test_tracer_destructor()\n\t\t\t]\n\t\t]", "def generation_db_modules():\n print(\"[root-get] DEBUG: Genearating DB for modules\")\n database = Db4pkg()\n db_manifest = database.generated_manifest()\n if not db_manifest:\n print(\"[root-get] Failed to generate DB for modules only\")\n return False\n return db_manifest", "def simulate_modules(self):\n for discrete_mod in list(self.modcells.keys()):\n # print(discrete_mod)\n # print('in simulate_modules, iterating to ', discrete_mod)\n self.simulate_module(discrete_mod)", "def setUpModule():\n # pull in test environment as dict\n global TestEnv\n get_test_env(TestEnv)", "def setUpModule():\n # pull in test environment as dict\n global TestEnv\n get_test_env(TestEnv)", "def __init__(self):\n self.modules = {}", "def setup_module():\n common_setup_module()", "def test_py_compile_basic(self):\n self._test_py_compile('basic')", "def _load_modules(self):\n modules_src = os.path.abspath(\"src/modules\")\n\n # perform a tree walk over modules directory\n for file_name, file_path in self._tree_walk(modules_src):\n try:\n # try to find a spec for this file and construct a module\n # from it\n spec = spec_from_file_location(file_name, file_path)\n assert spec is not None\n module = module_from_spec(spec)\n assert spec.loader is not None\n spec.loader.exec_module(module)\n self.modules.append(module)\n self._loaded_modules_names.append(module.__name__)\n except:\n pass", "def run(self):\n not_modules = []\n root = self.path\n files = []\n search_files = [\n '__openerp__.py', '__manifest__.py', 'README.rst', 'index.html']\n for sfile in search_files:\n files += subprocess.Popen(\n ['find', root, '-name', sfile],\n stdout=subprocess.PIPE).stdout.readlines()\n files.sort()\n files = [item.strip() for item in files]\n\n if os.path.isfile(os.path.join(root, '__openerp__.py')) or \\\n os.path.isfile(os.path.join(root, '__manifest__.py')):\n module_list = [os.path.basename(root)]\n root = os.path.split(root)[0]\n self.path = root\n else:\n module_list = os.walk(root).next()[1]\n if module_list.count('.git'):\n module_list.remove('.git')\n module_list.sort()\n\n for module in module_list:\n os.system('echo Generating index.html module ' + module)\n openerp_py = os.path.join(root, module, '__openerp__.py')\n readme_file = os.path.join(root, module, 'README.rst')\n index_file = os.path.join(\n root, module, 'static/description/index.html')\n\n if openerp_py not in files:\n openerp_py = os.path.join(root, module, '__manifest__.py')\n if openerp_py not in files:\n not_modules.append(module)\n continue\n\n # Get module data\n description = ''\n name, summary, description = self.get_module_data(\n openerp_py, readme_file)\n\n # Call @nhomar's script.\n html_description = rst2html.html.rst2html(description)\n\n content = self.prepare_content(html_description, name, summary)\n\n self.add_missing_dirs(index_file)\n self.add_missing_icon(module)\n\n with open(index_file, 'w') as ifile:\n ifile.write(content)\n\n if not_modules:\n for item in not_modules:\n print 'NOTE: This is not an odoo module', item\n return True", "def run():\n names=[i.__name__ for i in modList]\n res,action=kcs_ui.string_select('fake vitesse generator',\n 'Please select the module you want to generate fake vitesse py.',\n 'Press option to generate for all modules.',\n names)\n if res==kcs_util.ok():\n mod=modList[action-1]\n des=kcs_ui.string_req('Where do you want to place the file?',r'C:\\temp')\n if des[0]==kcs_util.ok():\n# des = os.path.join(os.path.join(os.getcwd(), \"FakeVitesse\"))\n fname = des[1] + \"\\\\\" + mod.__name__ + \".py\"\n GenPy(mod, fname)\n elif res==kcs_util.options():\n des=kcs_ui.string_req('Where do you want to place the file?',r'C:\\temp')\n if des[0]==kcs_util.ok():\n for mod in modList:\n fname = des[1] + \"\\\\\" + mod.__name__ + \".py\"\n GenPy(mod, fname)", "def test_module_doc():\r\n\r\n for fname in os.listdir('.'):\r\n if fname.endswith('.py'):\r\n f = fname.split('.')[0]\r\n print 'Executing ', fname\r\n execfile(fname, locals())", "def _importtestmodule(self):\n module = make_module_from_function(self.funcobj)\n copy_markinfo(module, self.funcobj)\n merge_pytestmark(module, self.parent.obj)\n return module", "def process_module_list(self, modules):", "def setup():\n find_modules('alerters')\n find_modules('watchers')\n find_modules('auditors')", "def enaml_module(tmpdir):\n name = '__enaml_test_module__'\n folder = str(tmpdir)\n path = os.path.join(folder, name + '.enaml')\n with open(path, 'w') as f:\n f.write(SOURCE)\n sys.path.append(folder)\n\n yield name, folder, path\n\n sys.path.remove(folder)\n if name in sys.modules:\n del sys.modules[name]", "def setUpModule():\n global primary_ecu_key\n global key_timeserver_pub\n global key_timeserver_pri\n global clock\n\n \n\n # Load the private key for this Primary ECU.\n key_pub = demo.import_public_key('primary')\n key_pri = demo.import_private_key('primary')\n primary_ecu_key = uptane.common.canonical_key_from_pub_and_pri(\n key_pub, key_pri)\n\n # Load the public timeserver key.\n key_timeserver_pub = demo.import_public_key('timeserver')\n key_timeserver_pri = demo.import_private_key('timeserver')\n\n # Generate a trusted initial time for the Primary.\n clock = tuf.formats.unix_timestamp_to_datetime(int(time.time()))\n clock = clock.isoformat() + 'Z'\n tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(clock)\n\n # Currently in development.\n\n # Start the timeserver, director, and oem repo for this test,\n # using subprocesses, and saving those processes as:\n #process_timeserver\n #process_director\n #process_oemrepo\n # to be stopped in tearDownModule below.", "def tests_generator(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n cmd_list = [\n (\"Initial Create/Compile/Read Compiled Tree\", \"{0} -D {1} -i 10 --makej -s {2}\"),\n ]\n\n tests = []\n for idx, (desc, cmd) in enumerate(cmd_list):\n test_name = \"compile_bench_{0}_{1}\".format(idx + 1, to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=cmd.format(cb_bin, self.test_path, bin_path))\n tests.append(test)\n return tests", "def _create_test_module_heading(nbdir):\n heading = 'import os\\n'\\\n 'import sys\\n'\\\n 'import test_func as tf\\n\\n'\\\n '#make projectdir accessible inside this script\\n'\\\n 'tst_dir = os.path.dirname(os.path.realpath(__file__))\\n'\\\n f'nb_dir = os.path.join(tst_dir, \"..\", *{os.path.split(nbdir)})\\n' # noqa E501\n\n return heading", "def module_name():\n new_module = 'test_module'\n yield new_module\n shutil.rmtree(new_module, ignore_errors=True)", "def gen_module(root_path, walls_height=3, floor_thickness=.3):\n levels = [gen_level(root_path + lv.rstrip() + '/')(\n floor_thickness = floor_thickness,\n walls_height = walls_height)\n for lv in os.popen('ls ' + root_path)]\n \n walls_hpc = []\n windows_hpc = []\n doors_hpc = []\n handrails_hpc = []\n floors_hpc = []\n stairs_foots = []\n lv = 0\n for walls, windows, doors, handrails, floor, stair_foot in levels:\n level_height = walls_height * lv\n \n walls_hpc.append(T(3)(level_height)(walls))\n windows_hpc.append(T(3)(level_height)(windows))\n doors_hpc.append(T(3)(level_height + floor_thickness)(doors))\n handrails_hpc.append(T(3)(level_height)(handrails))\n floors_hpc.append(T(3)(level_height)(floor))\n \n stairs_foots.append(stair_foot+[level_height])\n \n lv += 1\n \n walls_hpc = UNION(walls_hpc)\n windows_hpc = UNION(windows_hpc)\n doors_hpc = STRUCT(doors_hpc)\n handrails_hpc = UNION(handrails_hpc)\n floors_hpc = UNION(floors_hpc)\n \n cubes_hpc = []\n stairs_hpc = []\n for i in range(0, len(stairs_foots), 2):\n stair, cube = gen_stairs(stairs_foots[i], stairs_foots[i+1])\n cubes_hpc.append(cube)\n stairs_hpc.append(T(3)(floor_thickness)(stair))\n \n stairs_hpc = STRUCT(stairs_hpc)\n \n cubes_hpc = T(3)(floor_thickness)(STRUCT(cubes_hpc))\n floors_hpc = DIFFERENCE([floors_hpc, cubes_hpc])\n \n return STRUCT([\n SKEL_1(walls_hpc),\n windows_hpc,\n doors_hpc,\n handrails_hpc,\n floors_hpc,\n stairs_hpc])", "def setUpModule(): # noqa\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer()", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def test_main_modular(tmpdir_factory: TempdirFactory) -> None:\n\n output_directory = Path(tmpdir_factory.mktemp('output'))\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n output_path = output_directory / 'model'\n\n with freeze_time(TIMESTAMP):\n main(['--input', str(input_filename), '--output', str(output_path)])\n main_modular_dir = EXPECTED_MAIN_PATH / 'main_modular'\n for path in main_modular_dir.rglob('*.py'):\n result = output_path.joinpath(path.relative_to(main_modular_dir)).read_text()\n assert result == path.read_text()", "def test_class_module_names(dependency_testing_model) -> None:\n expected_modules = {\n 'builtins',\n 'calendar',\n 'click',\n 'cloudpickle',\n 'collections',\n 'datetime',\n 'google',\n 'json',\n 'numpy',\n 'pandas',\n 'PIL',\n 'requests',\n 'requests',\n 'sklearn',\n 'torch',\n 'typing',\n 'urllib3',\n 'verta',\n 'yaml',\n }\n extracted_modules: Set[str] = md.class_module_names(dependency_testing_model)\n assert set(extracted_modules) == set(expected_modules)", "def _generate_examples(self, folders, split):\n raise NotImplementedError(\"TODO\")", "def _createModuleObj(self):\n ModuleFaultCohesiveKin.__init__(self)\n return", "def setUp(self):\n self.sampler = {\n \"name\": \"samplername\",\n \"backend_name\": \"\",\n \"backend_header\": \"\",\n \"backend_prefix\": \"\",\n \"backend_suffix\": \"\",\n \"backend_footer\": \"\",\n \"ncores\": 2,\n \"threads_per_core\": 1,\n \"omp_enabled\": True,\n \"papi_enabled\": True,\n \"papi_counters_max\": 2,\n \"papi_counters_avail\": (\"C1\", \"C2\", \"C3\"),\n \"kernels\": {\"dgemm\": (\n 'dgemm', 'char*', 'char*', 'int*', 'int*', 'int*', 'double*',\n 'double*', 'int*', 'double*', 'int*', 'double*', 'float*',\n 'int*'\n )},\n \"nt_max\": random.randint(1, 10),\n \"exe\": \"x\"\n }\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")\n self.k = Symbol(\"k\")\n self.ns = [random.randint(1, 100) for _ in range(5)]", "def test_add_default_modules(mock_get_dm, mock_hc, require_platform_and_default_arch):\n mmd = load_mmd(read_staged_data(\"formatted_testmodule.yaml\"))\n xmd_brs = mmd.get_xmd()[\"mbs\"][\"buildrequires\"]\n assert set(xmd_brs.keys()) == {\"platform\"}\n\n platform = ModuleBuild.get_build_from_nsvc(\n db_session,\n \"platform\",\n xmd_brs[\"platform\"][\"stream\"],\n xmd_brs[\"platform\"][\"version\"],\n xmd_brs[\"platform\"][\"context\"],\n )\n assert platform\n platform_mmd = platform.mmd()\n platform_xmd = mmd.get_xmd()\n platform_xmd[\"mbs\"][\"use_default_modules\"] = True\n platform_mmd.set_xmd(platform_xmd)\n platform.modulemd = mmd_to_str(platform_mmd)\n\n dependencies = [\n {\"requires\": {\"platform\": [\"f28\"]},\n \"buildrequires\": {\"platform\": [\"f28\"]}}]\n make_module_in_db(\"python:3:12345:1\", base_module=platform, dependencies=dependencies)\n make_module_in_db(\"nodejs:11:2345:2\", base_module=platform, dependencies=dependencies)\n db_session.commit()\n\n mock_get_dm.return_value = {\n \"nodejs\": \"11\",\n \"python\": \"3\",\n \"ruby\": \"2.6\",\n }\n defaults_added = default_modules.add_default_modules(mmd)\n # Make sure that the default modules were added. ruby:2.6 will be ignored since it's not in\n # the database\n assert set(mmd.get_xmd()[\"mbs\"][\"buildrequires\"].keys()) == {\"nodejs\", \"platform\", \"python\"}\n mock_get_dm.assert_called_once_with(\n \"f28\",\n \"https://pagure.io/releng/fedora-module-defaults.git\",\n )\n assert \"ursine_rpms\" not in mmd.get_xmd()[\"mbs\"]\n assert defaults_added is True", "def _get_modules(self) -> Dict[str, ModuleType]:\n modules = {}\n terminal_path = Path(openbb_terminal.__file__).parent\n\n for file in terminal_path.glob(\"**/*controller.py\"):\n spec = spec_from_file_location(file.stem, file)\n if spec is not None and spec.loader is not None:\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n\n ctrl_path = (\n str(file)\n .replace(str(terminal_path), \"\")\n .replace(\"\\\\\", \"/\")\n .split(\"/\")[1:]\n )\n for sub_name, abbr in sub_folders_abbr.items():\n ctrl_path = [\n path.lower().replace(sub_name, abbr) for path in ctrl_path\n ]\n\n trailmap = \".\".join(ctrl_path[:-1])\n if trailmap not in modules:\n modules[trailmap] = module\n\n return modules", "def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]", "def main(argv=None):\n\t#Parse arguments\n\tparser = argparse.ArgumentParser(description='Python Unit Test Stub Generator')\n\n\tparser.add_argument('module', help='The path of the module to test.')\n\n\tparser.add_argument('-F', '--footer',\n\t\thelp='File to use as a footer.')\n\tparser.add_argument('-H', '--header',\n\t\thelp='File to use as a header.')\n\tparser.add_argument('-X', '--exclude', action='append', default=[],\n\t\thelp='Add a child directory name to exclude.')\n\n\tparser.add_argument('-f', '--force', action='store_true',\n\t\thelp='Force files to be generated, even if they already exist.')\n\tparser.add_argument('-i', '--internal', action='store_true',\n\t\thelp='Include internal classes and methods starting with a _.')\n\tparser.add_argument('-m', '--test-module', default='test',\n\t\thelp='The path of the test module to generate.')\n\tparser.add_argument('-p', '--test-prefix', default='test_',\n\t\thelp='The prefix for test files.')\n\tparser.add_argument('-t', '--tab-width', type=int,\n\t\thelp='The width of a tab in spaces (default actual tabs).')\n\n\tif argv is None:\n\t\targv = sys.argv\n\targuments = parser.parse_args(argv[1:])\n\n\t#Open the header and footer\n\theader = ''\n\tfooter = ''\n\tif arguments.header is not None:\n\t\twith open(arguments.header) as headerFile:\n\t\t\theader = headerFile.read()\n\tif arguments.footer is not None:\n\t\twith open(arguments.footer) as footerFile:\n\t\t\tfooter = footerFile.read()\n\n\t#Walk the directory finding Python files\n\tfor root, _, fileNames in os.walk(arguments.module):\n\t\tfor fileName in fileNames:\n\t\t\t#Skip ignored directories\n\t\t\t_, childDirectory = os.path.split(root)\n\t\t\tif childDirectory in arguments.exclude:\n\t\t\t\tcontinue\n\n\t\t\t#Generate unit test, skipping ignored files\n\t\t\tunitTest = Generator.generateUnitTest(root, fileName, arguments.internal)\n\t\t\tif unitTest is None:\n\t\t\t\tcontinue\n\n\t\t\t#Replace tabs\n\t\t\tif arguments.tab_width is not None:\n\t\t\t\tunitTest = unitTest.replace('\\t', ' ' * arguments.tab_width)\n\n\t\t\t#Add header and footer\n\t\t\tunitTest = header + unitTest + footer\n\n\t\t\t#Write it\n\t\t\toutFile = '%s%s' % (arguments.test_prefix, fileName)\n\t\t\toutFolder = arguments.test_module\n\t\t\tif not os.path.exists(outFolder):\n\t\t\t\tos.makedirs(outFolder)\n\n\t\t\t#TODO: do this at every level\n\t\t\ttestInit = os.path.join(outFolder, '__init__.py')\n\t\t\tif not os.path.exists(testInit):\n\t\t\t\twith open(testInit, 'w') as testInitFile:\n\t\t\t\t\ttestInitFile.write('')\n\n\t\t\toutPath = os.path.join(outFolder, outFile)\n\t\t\tif arguments.force or not os.path.exists(outPath) or os.stat(outPath).st_size == 0:\n\t\t\t\tprint('[%s] Writing...' % outPath)\n\t\t\t\twith open(outPath, 'w') as outFile:\n\t\t\t\t\toutFile.write(unitTest)\n\t\t\telse:\n\t\t\t\tprint('[%s] Already exists' % outPath)\n\n\treturn 0", "def test_ifPythonModuleIsInstalled():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"pyModule\" in testConfig.config:\n print \"pyModule: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfPythonModuleIsInstalled, testConfig.config", "def test_make_module(self):\n gcode = gencode.GenCode()\n gcode.make_module(self.schema)\n module = gcode.usercode\n self.assertTrue(isinstance(module.Students, table.UserTable))\n self.assertTrue(issubclass(module.Students.Record, records.Record))\n self.assertTrue(issubclass(module.Students.RecordSet, records.RecordSet))\n self.assertIs(module.Students.RecordSet.Record, module.Students.Record)", "def test_module_initialization(self):\n m = Module('foo')\n assert str(m) == 'foo'\n\n m = Module('foo.bar')\n assert str(m) == 'foo.bar'\n\n m = Module('foo.bar.qux')\n assert str(m) == 'foo.bar.qux'", "def get_module_info():\n\n return {RUNNER_NAME: ('mock runner', MockRunner)}", "def setUp(self):\n self.dbus_mock = MagicMock()\n self.mainloop_mock = MagicMock()\n self.gobject_mock = MagicMock()\n\n modules = {\n 'dbus': self.dbus_mock,\n 'dbus.mainloop.glib': self.mainloop_mock,\n 'gi.repository': self.gobject_mock,\n }\n self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits\n self.module_patcher = patch.dict('sys.modules', modules)\n self.module_patcher.start()\n from bluezero import tools\n self.module_under_test = tools", "def tests():", "def generate_sample_cdk_tsc_module(env_root, module_dir=None):\n if module_dir is None:\n module_dir = os.path.join(env_root, 'sampleapp.cdk')\n generate_sample_module(module_dir)\n for i in ['.npmignore', 'cdk.json', 'package.json', 'runway.module.yml',\n 'tsconfig.json', 'README.md']:\n shutil.copyfile(\n os.path.join(ROOT,\n 'templates',\n 'cdk-tsc',\n i),\n os.path.join(module_dir, i),\n )\n for i in [['bin', 'sample.ts'], ['lib', 'sample-stack.ts']]:\n os.mkdir(os.path.join(module_dir, i[0]))\n shutil.copyfile(\n os.path.join(ROOT,\n 'templates',\n 'cdk-tsc',\n i[0],\n i[1]),\n os.path.join(module_dir, i[0], i[1]),\n )\n with open(os.path.join(module_dir, '.gitignore'), 'w') as stream:\n stream.write('*.js\\n')\n stream.write('*.d.ts\\n')\n stream.write('node_modules\\n')\n LOGGER.info(\"Sample CDK module created at %s\", module_dir)\n LOGGER.info('To finish its setup, change to the %s directory and execute '\n '\"npm install\" to generate its lockfile.', module_dir)", "def find_all_test_files():\n #test_file_pattern = re.compile('^t(est)?_.*\\.py$')\n test_file_pattern = re.compile('.*_test\\.py$')\n is_test_file = lambda filename: test_file_pattern.match(filename)\n drop_dot_py = lambda filename: filename[:-3]\n join_module = lambda *names: '/'.join(names)\n\n modules = []\n for root, dirs, files in os.walk(os.curdir):\n root_name = os.path.split(root)[-1]\n for test_file in filter(is_test_file, files):\n module = join_module(root_name, drop_dot_py(test_file))\n modules.append(module)\n #modules += ['.'.join([root_name, drop_dot_py(test_file)]) for test_file in filter(is_test, files)]\n return modules", "def setUpModule():\n logging.basicConfig()\n # logPoint('module %s' % __name__)", "def test_package(self):\n pass", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def generate(module_name, module_path, target_dir):\n if not (Path(module_path) / 'builtins.stub.py').exists():\n copy(Path(__file__).parent.parent / 'stubs/builtins.stub.py', module_path)\n build_swift_wrappers_module(module_name, module_path, target_dir)", "def process_modules(self) -> typing.NoReturn:\n\t\tfor moduleName in self.moduleNameSet:\n\t\t\tdetected_encoding = detect_encoding(moduleName)\n\n\t\t\tprint(f\"Processing {moduleName} ({detected_encoding})\")\n\n\t\t\twith open(moduleName, 'r+', encoding=detected_encoding) as fileStream:\n\t\t\t\t# Store the content of the file\n\t\t\t\tfileContent: str = fileStream.read()\n\t\t\t\t# Sets the file's current position at the offset, the position of the read/write pointer within the file\n\t\t\t\tfileStream.seek(0, 0)\n\t\t\t\t# Truncates the file's size\n\t\t\t\tfileStream.truncate()\n\n\t\t\t\t# Process regex patterns\n\t\t\t\tfor regexDict in regexDictList:\n\t\t\t\t\tfileContent = self.process_function(regexDict, fileContent)\n\n\t\t\t\t# Rewrite the processed content of the file\n\t\t\t\tfileStream.write(fileContent)", "def setUp(self):\n # After stage1:\n # TODO: use this form after implementing a fixer to consolidate\n # __future__ imports into a single line:\n # self.headers1 = \"\"\"\n # from __future__ import absolute_import, division, print_function\n # \"\"\"\n self.headers1 = reformat_code(\"\"\"\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n \"\"\")\n\n # After stage2 --all-imports:\n # TODO: use this form after implementing a fixer to consolidate\n # __future__ imports into a single line:\n # self.headers2 = \"\"\"\n # from __future__ import (absolute_import, division,\n # print_function, unicode_literals)\n # from future import standard_library\n # from future.builtins import *\n # \"\"\"\n self.headers2 = reformat_code(\"\"\"\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n from __future__ import unicode_literals\n from future import standard_library\n standard_library.install_aliases()\n from builtins import *\n \"\"\")\n self.interpreters = [sys.executable]\n self.tempdir = tempfile.mkdtemp() + os.path.sep\n pypath = os.getenv('PYTHONPATH')\n if pypath:\n self.env = {'PYTHONPATH': os.getcwd() + os.pathsep + pypath}\n else:\n self.env = {'PYTHONPATH': os.getcwd()}", "def _make_modules(is_train):\n return {\n 'conversion': functools.partial(\n conversion, is_train=is_train, is_extrapolation=False),\n 'time': functools.partial(time, is_train=is_train),\n }", "def morepath_modules(cls: type[morepath.App]) -> 'Iterator[str]':\n for base in cls.__mro__:\n if not issubclass(base, morepath.App):\n continue\n\n if base is morepath.App:\n continue\n\n module = '.'.join(base.__module__.split('.')[:2])\n\n if module.startswith('test'):\n continue\n\n yield module", "def __ensure_testcase_module(path: Text) -> NoReturn:\n init_file = os.path.join(os.path.dirname(path), \"__init__.py\")\n if os.path.isfile(init_file):\n return\n\n with open(init_file, \"w\", encoding=\"utf-8\") as f:\n f.write(\"# NOTICE: Generated By HttpRunner. DO NOT EDIT!\\n\")", "def build_dynamic_module(self):\r\n self.code_gen()\r\n\r\n mod = cmodule.DynamicModule()\r\n\r\n # The code of instantiate\r\n # the 1 is for error_storage\r\n code = self.instantiate_code(1 + len(self.args))\r\n instantiate = cmodule.ExtFunction('instantiate', code,\r\n method=cmodule.METH_VARARGS)\r\n #['error_storage'] + argnames,\r\n #local_dict = d,\r\n #global_dict = {})\r\n\r\n # Static methods that can run and destroy the struct built by\r\n # instantiate.\r\n if PY3:\r\n static = \"\"\"\r\n static int {struct_name}_executor({struct_name} *self) {{\r\n return self->run();\r\n }}\r\n\r\n static void {struct_name}_destructor(PyObject *capsule) {{\r\n {struct_name} *self = ({struct_name} *)PyCapsule_GetContext(capsule);\r\n delete self;\r\n }}\r\n \"\"\".format(struct_name=self.struct_name)\r\n else:\r\n static = \"\"\"\r\n static int %(struct_name)s_executor(%(struct_name)s* self) {\r\n return self->run();\r\n }\r\n\r\n static void %(struct_name)s_destructor(void* executor, void* self) {\r\n delete ((%(struct_name)s*)self);\r\n }\r\n \"\"\" % dict(struct_name=self.struct_name)\r\n\r\n # We add all the support code, compile args, headers and libs we need.\r\n for support_code in self.support_code() + self.c_support_code_apply:\r\n mod.add_support_code(support_code)\r\n mod.add_support_code(self.struct_code)\r\n mod.add_support_code(static)\r\n mod.add_function(instantiate)\r\n for header in self.headers():\r\n mod.add_include(header)\r\n for init_code_block in self.init_code() + self.c_init_code_apply:\r\n mod.add_init_code(init_code_block)\r\n\r\n return mod", "def _createModuleObj(self):\n ModuleTimeHistory.__init__(self)", "def test_get_leaf_modules(request):\n filename = request.module.__file__\n qalgebra_dir = os.path.join(\n os.path.split(filename)[0], '..', 'src', 'qalgebra'\n )\n modules = get_leaf_modules(qalgebra_dir)\n assert \"qalgebra.core.abstract_algebra\" in modules", "def testmodule():\n import doctest\n import sys\n thismodule = sys.modules[__name__]\n return doctest.testmod(m=thismodule)", "def testmodule():\n import doctest\n import sys\n thismodule = sys.modules[__name__]\n return doctest.testmod(m=thismodule)", "def prepare_gen(self, targets):\r\n pass" ]
[ "0.7218219", "0.72155625", "0.70887715", "0.6988405", "0.6895615", "0.66870666", "0.6605496", "0.6373421", "0.6339255", "0.63342136", "0.63128173", "0.6293195", "0.6291521", "0.62130636", "0.62130636", "0.61693025", "0.6134017", "0.613319", "0.61327916", "0.6114747", "0.6091289", "0.60851675", "0.6083036", "0.60827166", "0.6068651", "0.60674274", "0.60469526", "0.60315585", "0.5988351", "0.59861535", "0.5984465", "0.59647846", "0.59647846", "0.59621453", "0.5947603", "0.59345037", "0.5919532", "0.58960015", "0.5881558", "0.5880458", "0.58796763", "0.5870334", "0.5866289", "0.5864555", "0.58423287", "0.5827894", "0.5827681", "0.5820485", "0.58202225", "0.58158904", "0.58158904", "0.58119595", "0.5804857", "0.5802697", "0.579772", "0.5788233", "0.5782435", "0.57817703", "0.5770859", "0.5765717", "0.57628906", "0.57524645", "0.57475203", "0.5746898", "0.5744027", "0.5740899", "0.57378536", "0.5737342", "0.57368726", "0.5736691", "0.57193124", "0.57159305", "0.5712166", "0.5710592", "0.5706659", "0.57017285", "0.569913", "0.5697353", "0.5683245", "0.56806827", "0.5672867", "0.5668579", "0.5658773", "0.56532395", "0.56335723", "0.5629894", "0.56227803", "0.56156856", "0.5614662", "0.5612925", "0.55928266", "0.5588357", "0.55826956", "0.5575435", "0.5573181", "0.5566584", "0.55595237", "0.5549457", "0.5548676", "0.5548676", "0.553486" ]
0.0
-1
This method uses the original test data and compiled runtime.Modules to run in the test runner to verify the results.
def run_and_check( models: List[AOTCompiledTestModel], runner: AOTTestRunner, interface_api: str, debug_calculated_workspaces=False, workspace_byte_alignment=8, constant_byte_alignment=8, data_linkage: AOTDataLinkage = None, test_dir: str = None, verbose: bool = False, use_workspace_io: bool = False, debug_last_error: bool = False, checker: Optional[Callable[[str], bool]] = None, ): def run_and_check_body(base_path): cflags = ( f"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} " f" -DTVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES={constant_byte_alignment} " ) # The calculated workspaces will not account for stack allocator tags used for debugging if debug_calculated_workspaces: cflags += "-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK " base_path = os.path.abspath(base_path) build_path = os.path.join(base_path, "build") os.makedirs(build_path, exist_ok=True) include_path = os.path.join(base_path, "include") os.mkdir(include_path) tvm.micro.copy_crt_config_header("crt", include_path) workspace_bytes = 0 for compiled_model in models: model = compiled_model.model tar_file = os.path.join(base_path, f"{model.name}.tar") export_model_library_format(compiled_model.executor_factory, tar_file) t = tarfile.open(tar_file) t.extractall(base_path) # Interface C APIs does not need compiler generated # workspace to generate the test application, because # workspace size is codegen'd as a macro to # tvmgen_<model_name>.h. if interface_api != "c": workspace_bytes += mlf_extract_workspace_size_bytes(tar_file) workspace_bytes += model.extra_memory_in_bytes for key in model.inputs: sanitized_tensor_name = re.sub(r"\W", "_", key) _create_header_file( f'{_mangle_name(model.name, "input_data")}_{sanitized_tensor_name}', model.inputs[key], include_path, data_linkage, ) for key in model.outputs: sanitized_tensor_name = re.sub(r"\W", "_", key) _create_header_file( f'{_mangle_name(model.name, "output_data")}_{sanitized_tensor_name}', np.zeros(model.outputs[key].shape, model.outputs[key].dtype), include_path, data_linkage, ) _create_header_file( f'{_mangle_name(model.name, "expected_output_data")}_{sanitized_tensor_name}', model.outputs[key], include_path, data_linkage, ) use_usmp = runner.pass_config.get("tir.usmp.enable", False) # We only need the stack allocator if USMP is not used use_stack_allocator = not use_usmp _create_main( "test.c", models, build_path, runner.includes, runner.prologue, runner.epilogue, data_linkage, interface_api, workspace_bytes, use_stack_allocator, use_workspace_io, debug_last_error, ) if checker and (not checker(base_path)): return False # Verify that compiles fine file_dir = os.path.dirname(os.path.abspath(__file__)) makefile_dir = os.path.join(file_dir, "../../../tests/python/relay/aot") codegen_path = os.path.join(base_path, "codegen") makefile = os.path.join(makefile_dir, f"{runner.makefile}.mk") fvp_dir = "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/" # TODO(@grant-arm): Remove once ci_cpu docker image has been updated to FVP_Corstone_SSE if not os.path.isdir(fvp_dir): fvp_dir = "/opt/arm/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/" custom_params = " ".join( [f" {param}='{value}'" for param, value in runner.parameters.items()] ) make_command = ( f"make -f {makefile} build_dir={build_path}" + f" CFLAGS='{cflags}'" + f" TVM_ROOT={file_dir}/../../.." + f" AOT_TEST_ROOT={makefile_dir}" + f" CODEGEN_ROOT={codegen_path}" + f" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}" + f" FVP_DIR={fvp_dir}" + custom_params ) compile_log_path = os.path.join(build_path, "test_compile.log") compile_command = f"{make_command} aot_test_runner" if verbose: print("Compile command:\n", compile_command) _subprocess_check_log_output(compile_command, ".", compile_log_path) # Verify that runs fine run_log_path = os.path.join(build_path, "test_run.log") run_command = f"{make_command} run" if verbose: print("Run command:\n", run_command) _subprocess_check_log_output(run_command, build_path, run_log_path) with open(run_log_path) as run_log: assert AOT_SUCCESS_TOKEN in run_log.read() return True if test_dir is None: tmpdir = utils.tempdir() return run_and_check_body(os.path.join(tmpdir.path, "test")) else: return run_and_check_body(test_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runTests(self):\n \n pass", "def runtest(self):", "def test_process_data(self):\n pass", "def test1():\n for test in pkl.load(open(TEST_RESOURCES_DIR / \"regression_vault.pkl\", \"rb\"))[:5]:\n init_dict, rslt = test\n np.testing.assert_array_equal(run_regression_test(init_dict), rslt)", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def run(self):\n if self.verbose:\n print(f'Running {self.name} tests...')\n\n # try running setup if there is one\n if self.setup:\n self.__process_setup()\n\n final_report = [None] * len(self.tests)\n\n for test_in, test_out in sorted(self.tests.items()):\n # increment total num of tests\n self.total += 1\n\n if self.verbose:\n print(f'#{self.total}')\n\n # evaluate test input w/ setup vars, if any\n try:\n inp = eval(test_in, self.vars)\n except Exception as err:\n print(f'Issue during evaluation of test input: {err}')\n final_report[self.total - 1] = 'input eval error'\n if self.verbose:\n print(f'Test input was: {test_in}')\n print('Vars from execution: {}'.format({k : v for k, v in self.vars.items() if k != '__builtins__'}))\n continue\n\n \n # checking if function input has more than one arg\n if type(inp) in (list, tuple):\n try:\n student_out = self.student_function(*inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Inputs were: {inp}')\n continue\n else:\n try:\n student_out = self.student_function(inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Input was: {inp}')\n continue\n\n # ans alias for ease of answer checking\n self.vars['ans'] = student_out\n\n if self.schema:\n format_vals = eval(test_out, self.vars)\n results, maybe_failed_schema = self.__process_schema(format_vals)\n if all(results):\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed at least one of the tests\n failed_str = \" and \".join([\", \".join(maybe_failed_schema[:-1]),maybe_failed_schema[-1]] if len(maybe_failed_schema) > 2 else maybe_failed_schema)\n final_report[self.total - 1] = f'FAILED; failed following assertion(s): {failed_str}'\n else:\n expected_ans = eval(test_out, self.vars)\n if student_out == expected_ans:\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed the only test\n final_report[self.total - 1] = f'FAILED; got {repr(student_out)} but expected {repr(expected_ans)}'\n\n # run callback function, if there is one\n if self.callback:\n if self.verbose:\n print('Running callback...')\n print('call back is:', self.callback)\n\n # once done, put the final report on the queue\n self.queue.put((self.student_username, self.name, f'{self.correct}/{self.total}', final_report))", "def run_tests(self):\n raise NotImplementedError", "def run_test(self):\n raise NotImplementedError", "def runTest(self):\n return True", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def run_all_unit_tests():\n original = verify.parse_content\n try:\n verify.parse_content = parse_string_in_scope\n\n test_list_of()\n\n test_activity_multiple_choice()\n test_activity_free_text()\n test_activity_multiple_choice_group()\n test_activity_ast()\n\n test_assessment()\n test_assessment_ast()\n\n # test existing verifier using parsing instead of exec/compile\n verify.test_sample_assets()\n finally:\n verify.parse_content = original", "def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])", "def test_module(self):\n pass", "def test(self, dataset):\n\n outputs, errors = self.use(dataset)\n\n ## PUT CODE HERE ##\n # I put the code in the \"use\" function, seems better :-)\n\n return outputs, errors", "def run():\n tests = [\n # [\n # [\"\", \"sumRange\", \"sumRange\", \"sumRange\", \"update\", \"sumRange\", \"sumRange\", \"sumRange\"],\n # [[1, 2, 3, 4, 5], [0, 2], [1, 2], [1, 3], [1, 3], [0, 2], [1, 2], [0, 1]]\n # ],\n [\n [\"NumArray\", \"sumRange\", \"update\", \"sumRange\"],\n [[-1], [0, 0], [0, 1], [0, 0]]\n ]\n ]\n\n results = [\n # [None, 3, 2, 5, None, 4, 2, 2],\n [None, -1, None, 1],\n\n ]\n for test, res in zip(tests, results):\n print('-----'*20)\n cmds, params = test\n cmds.pop(0)\n p = params.pop(0)\n res.pop(0)\n obj = BIT(p)\n for cmd, param, exp in zip(cmds, params, res):\n v = getattr(obj, cmd)(*param)\n print(obj)\n print(f'------\\n{cmd}, {param} --> {v} : {exp}')\n assert exp == v", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def test_generate_all_testing(self):\n pass", "def test(self):\n self.eval()", "def getValidTests(sourceTree):\n\n tests = getSections()\n newTests = tests[:]\n\n # [main] is reserved for test suite parameters\n newTests.remove(\"main\")\n\n removeList = []\n \n for test in newTests:\n\n print \" \"\n print \"checking parameters for test %s\" % (test)\n \n # check for the manditory parameters\n if (not (keyIsValid(\"%s.buildDir\" % (test)) and\n keyIsValid(\"%s.inputFile\" % (test)) and\n (sourceTree == \"fParallel\" or \n keyIsValid(\"%s.probinFile\" % (test)) ) and\n keyIsValid(\"%s.needs_helmeos\" % (test)) and\n keyIsValid(\"%s.dim\" % (test)) ) ):\n warning(\" WARNING: manditory runtime parameters for test %s not set\" % (test))\n warning(\" skipping test\")\n removeList.append(test)\n continue\n\n\n # check for optional parameters\n\n # restartTest\n if (not keyIsValid(\"%s.restartTest\" % (test)) ):\n warning(\" Assuming test is not restart run.\\n\")\n globalParams[\"%s.restartTest\" % (test)] = 0\n else:\n\n if (getParam(\"%s.restartTest\" % (test)) ):\n\n # make sure that the file number to restart from is defined\n if (not keyIsValid(\"%s.restartFileNum\" % (test)) ):\n warning(\"WARNING: test %s is a restart test, but is missing the restartFileNum parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # compileTest\n if (not keyIsValid(\"%s.compileTest\" % (test)) ):\n warning(\" Assuming test is not compilation test run.\\n\")\n globalParams[\"%s.compileTest\" % (test)] = 0\n\n\n # selfTest\n if (not keyIsValid(\"%s.selfTest\" % (test)) ):\n warning(\" Assuming test is not a self-test.\\n\")\n globalParams[\"%s.selfTest\" % (test)] = 0\n else:\n\n if (getParam(\"%s.selfTest\" % (test)) ):\n \n # make sure that the success string is defined\n if (not keyIsValid(\"%s.stSuccessString\" % (test)) ):\n warning(\"WARNING: test %s is a self-test, but is missing stSuccessString parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n\n\n # useMPI\n if (not keyIsValid(\"%s.useMPI\" % (test)) ):\n warning(\" Assuming normal (not MPI) run.\\n\")\n globalParams[\"%s.useMPI\" % (test)] = 0\n else:\n\n if (getParam(\"%s.useMPI\" % (test)) ):\n\n # make sure that the number of processors is defined\n if (not keyIsValid(\"%s.numprocs\" % (test)) ):\n warning(\"WARNING: test %s is a parallel test, but did not specify the numprocs parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # doVis\n if (not keyIsValid(\"%s.doVis\" % (test)) ):\n warning(\" Assuming no visualization.\\n\")\n globalParams[\"%s.doVis\" % (test)] = 0\n else:\n\n if (getParam(\"%s.doVis\" % (test)) ):\n\n # find out what variable to plot\n if (not keyIsValid(\"%s.visVar\" % (test)) ):\n warning(\"WARNING: test %s requested visualization but did not specify the visVar parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # remove the invalid tests\n for test in removeList:\n newTests.remove(test)\n \n return newTests", "def tests():", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_data_manipulation(self):\n target_name = self.project['target']['name']\n self.api_mock.return_value.get_metadata.return_value = [\n {'_id': '0',\n 'pid': '1',\n 'created': datetime.datetime.now(),\n 'name':'universe',\n 'originalName': 'credit-sample-200.csv',\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '1',\n 'pid': '1',\n 'name':'test',\n 'originalName': 'credit-sample-200.csv',\n 'created': datetime.datetime.now(),\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '2',\n 'pid': '1',\n 'name':'new',\n 'created': datetime.datetime.now(),\n 'originalName': 'credit-sample-200.csv',\n 'newdata':True,\n 'controls':{},\n 'shape': [2, 100],\n 'varTypeString': 'NN',\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}}]\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1',\n 'command': 'fit', 'max_reps': 0,\n 'samplepct': 100})\n\n #target\n #this will map the target values to (0,1) because target type is Binary\n target_vector = self.dataprocessor.target_vector()\n target_series = target_vector['main']\n self.assertItemsEqual(np.unique(target_series), [0,1])\n\n #this will be none because 'holdout_pct' isn't set in the project data\n self.assertIsNone(target_vector['holdout'])\n\n #prediction dataset\n predictors = self.dataprocessor.predictors()\n pred_dataframe = predictors['1']['main']\n self.assertItemsEqual(list(pred_dataframe.columns), [\"age\"])\n self.assertEqual(self.dataprocessor.get_vartypestring_without_target('1'), \"N\")\n\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1', 'scoring_dataset_id': '2', 'command': 'predict', 'max_reps': 0, 'samplepct':100})\n dp2 = DataProcessor(request)\n data = dp2.request_datasets()\n self.assertEqual(data.keys(), ['1'])\n self.assertEqual(data['1'].keys(), ['scoring', 'vartypes'])\n scoring_data = data['1']['scoring']\n vartypes = data['1']['vartypes']\n self.assertEqual(list(scoring_data.columns), [\"age\"])\n self.assertEqual(vartypes, \"N\")", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def test_run(id_, args, kwargs, expected):\n # Remove results for test.\n results_path = expected['results_path']\n shutil.rmtree(results_path, ignore_errors=True)\n # Start code.\n # [future] attempts to run classification with n_jobs>1\n # global cnfg_default\n # sys.modules['cnfg_default'] = get_params(args[0], 'cnfg_default')\n # import cnfg_default\n # #from cnfg_default import custom_score_metric\n objects = pycnfg.run(oid='default', *args, **kwargs)\n tmp = {k: type(v).__name__ for k, v in objects.items()}\n print('OBJECTS:')\n print(tmp)\n # Compare results:\n # * Compare objects (keys and str of values).\n objects_ = expected['objects']\n objects = {k: type(v).__name__ for k, v in objects.items()}\n if objects != objects_:\n print(set(objects.items()) ^ set(objects_.items()))\n assert objects == objects_\n # for k, v in objects.items():\n # assert k in objects_\n # assert type(v).__name__ == objects_[k]\n # * Compare predictions csv(all available).\n pred_path = glob.glob(f\"{results_path}/models/*_pred.csv\")\n pred_path_ = glob.glob(expected['pred_path'])\n assert len(pred_path) == len(pred_path_)\n for act, exp in zip(sorted(pred_path), sorted(pred_path_)):\n file_diff(act, exp)\n assert filecmp.cmp(act, exp)\n # * Compare test logs.\n logs_path = glob.glob(f\"{results_path}/logs*/*_test.log\")[0]\n logs_path_ = expected['logs_path']\n file_diff(logs_path, logs_path_)\n assert filecmp.cmp(logs_path, logs_path_)\n # * Compare runs dataframe, non-universe columns.\n runs_path = f\"{results_path}/runs\"\n runs_path_ = expected['runs_path']\n df = runs_loader(runs_path)\n df_ = runs_loader(runs_path_)\n # First False/True for each element, then check all by columns.\n # col1 True\n # col2 False\n # dtype: bool\n df_diff = df.eq(df_).all()\n # Column names that are not equal.\n columns = sorted(list(df_diff[df_diff==False].dropna().index))\n # columns_eq = sorted(list(df_diff[df_diff==True].dropna().index))\n columns_ = expected['columns_diff']\n print('DIFF:\\n', columns)\n time.sleep(1)\n # assert columns == columns_\n # * Compare model.\n model_path = glob.glob(f\"{results_path}/models/*.model\")\n model_path_ = glob.glob(expected['model_path'])\n assert len(model_path) == len(model_path_)\n for act, exp in zip(sorted(model_path), sorted(model_path_)):\n assert filecmp.cmp(act, exp)\n return", "def check(self, test_modules=__all__):\n\n # if test suite is being running from within forcebalance module, append the forcebalance prefix\n if __name__==\"forcebalance.test.__init__\":\n test_modules = [\"forcebalance.test.\" + test_module for test_module in test_modules]\n\n for test_module in test_modules:\n __import__(test_module)", "def test_basic_execution(self):", "def test_get_run(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_run_multi_r__(self):\n\n # Test Description\n # ================\n #\n # 1. This test intialises an example *eopy.matchup.matchupIO.MatchUp* object\n #\n # 2. Compare transformed dataset to expected value\n\n ################################################################################################################\n # 1. Initialise Test Data Object\n ################################################################################################################\n\n MatchUpTest = return_MatchUpTest_r__()\n\n ################################################################################################################\n # 2. Define expected values\n ################################################################################################################\n\n # Original dataset values (should be unchanged)\n MatchUpOriginal_expected = return_MatchUpTest_r__()\n\n # Transformed dataset\n values_expected = array([294.0625, 480.3733333, 300.6, 227.3846154, 210.1533333,\n 22.74193548, 22.0625, 21.96875, 22.80645161, 23.5,\n 21.66666667, 21.05882353, 23, 22.40625,\n 38.33333333, 36.63636364, 36.5, 38.42857143,\n 30.1, 32.14893617, 29.37254902, 28.88461538, 28.56603774,\n 33.45238095, 32.81395349, 31.77272727, 32.60465116,\n 40.125, 43.54054054, 38.59090909, 34.08510638,\n 13.72727273, 12, 14.1, 11.79069767, 17.53846154,\n 12.69565217, 31.16666667, 12.26086957, 11.52272727,\n 8.8125, 12, 7.4, 10.13207547])\n unc_expected = [Uncertainty(1, array([1.6, 1.5, 1.5, 1.3, 1.5])),\n Uncertainty(1, array([3.1, 3.2, 3.2, 3.1, 3.0])),\n Uncertainty(1, array([3.3, 3.4, 3.1, 3.2])),\n Uncertainty(1, array([2.1, 2.2, 2.2, 2.1])),\n Uncertainty(1, array([5.0, 4.7, 5.1, 5.2, 5.3])),\n Uncertainty(1, array([4.2, 4.3, 4.4, 4.3])),\n Uncertainty(1, array([4.0, 3.7, 4.4, 4.7])),\n Uncertainty(1, array([2.2, 1.7, 2.0, 4.3, 2.6])),\n Uncertainty(1, array([2.3, 1.2, 2.3, 4.4])),\n Uncertainty(1, array([3.2, 2.7, 3.0, 5.3]))]\n ks_expected = array([4.8, 6.8, 5.2, 5.6, 5.2, 12.10287443, 13.99394856, 12.48108926, 12.85930408])\n unck_expected = [Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25])),\n Uncertainty(1, array([0.2644, 0.2644, 0.2644, 0.2644]))]\n idx_expected = {\"Nm\": [5, 4],\n \"cNm\": [0, 5, 9],\n \"Im\": [[0, 1], [1, 2]],\n \"sensors\": [-1, 1, 2],\n \"sensor_ms\": [1, 3, 3],\n \"n_sensor\": [0, 1, 1, 2, 1, 1, 2, 1, 1, 2],\n \"n_mu\": [1, 1, 2, 2, 1, 2, 2, 1, 2, 2],\n \"n_cov\": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"N_var\": [5, 5, 4, 4, 5, 4, 4, 5, 4, 4],\n \"idx\": [0, 5, 10, 14, 18, 23, 27, 31, 36, 40, 44],\n \"Ia\": [1, 1, 1, 2, 2, 2]}\n a_expected = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])\n w_matrices_expected = []\n u_matrices_expected = []\n\n ################################################################################################################\n # 3. Run Transform2NormInd.run()\n ################################################################################################################\n\n Transform2NormIndOp = Transform2NormInd()\n MatchUpTransform = Transform2NormIndOp.run(MatchUpTest)\n\n values_test = MatchUpTransform.values\n unc_test = MatchUpTransform.unc\n w_matrices_test = MatchUpTransform.w_matrices\n u_matrices_test = MatchUpTransform.u_matrices\n ks_test = MatchUpTransform.ks\n unck_test = MatchUpTransform.unck\n idx_test = MatchUpTransform.idx\n\n ################################################################################################################\n # 4. Compare retrieve values to expect values\n ################################################################################################################\n\n # Test transformed data object attribute by attribute\n\n # a. values\n for i, (value_expected, value_test) in enumerate(zip(values_expected, values_test)):\n self.assertAlmostEqual(value_expected, value_test, places=5, msg=str(i))\n\n # b. unc\n for block_unc_test, block_unc_expected in zip(unc_test, unc_expected):\n self.assertEqual(block_unc_expected.typeID, block_unc_test.typeID)\n self.assertEqual(block_unc_expected.uR.tolist(), block_unc_test.uR.tolist())\n\n # c. w_matrices\n self.assertEqual(w_matrices_test, w_matrices_expected)\n\n # d. u_matrices\n self.assertEqual(u_matrices_test, u_matrices_expected)\n\n # e. ks\n for k_expected, k_test in zip(ks_expected, ks_test):\n self.assertAlmostEqual(k_test.tolist(), k_expected.tolist(), places=5)\n\n # f. unck\n for block_unck_test, block_unck_expected in zip(unck_test, unck_expected):\n self.assertEqual(block_unck_expected.typeID, block_unck_test.typeID)\n self.assertEqual(block_unck_expected.uR.tolist(), block_unck_test.uR.tolist())\n\n # h. idx\n self.assertEqual(set(idx_expected.keys()), set(idx_test.keys()))\n for key in idx_expected.keys():\n idx_i_test = idx_test[key]\n idx_i_expected = idx_expected[key]\n if isinstance(idx_i_expected, ndarray):\n self.assertEqual(idx_i_test.tolist(), idx_i_expected.tolist())\n else:\n self.assertEqual(idx_i_test, idx_i_expected)\n\n # Test original data object preserved attribute by attribute\n\n # a. values\n for i, (value_original_expected, value_original_test) in enumerate(zip(MatchUpOriginal_expected.values, MatchUpTest.values)):\n self.assertAlmostEqual(value_original_expected, value_original_test, places=5)\n\n # b. unc\n for block_unc_original_expected, block_unc_original_test in zip(MatchUpOriginal_expected.unc, MatchUpTest.unc):\n self.assertEqual(block_unc_original_expected.typeID, block_unc_original_test.typeID)\n self.assertEqual(block_unc_original_expected.uR.tolist(), block_unc_original_test.uR.tolist())\n\n # c. w_matrices\n self.assertEqual(MatchUpOriginal_expected.w_matrices, MatchUpTest.w_matrices)\n\n # d. u_matrices\n self.assertEqual(MatchUpOriginal_expected.u_matrices, MatchUpTest.u_matrices)\n\n # e. ks\n for k_original_expected, k_original_test in zip(MatchUpOriginal_expected.ks, MatchUpTest.ks):\n self.assertAlmostEqual(k_original_test.tolist(), k_original_expected.tolist(), places=5)\n\n # f. unck\n for block_unck_original_expected, block_unck_original_test in zip(MatchUpOriginal_expected.unck, MatchUpTest.unck):\n self.assertEqual(block_unck_original_expected.typeID, block_unck_original_test.typeID)\n self.assertEqual(block_unck_original_expected.uR.tolist(), block_unck_original_test.uR.tolist())\n\n # h. idx\n self.assertEqual(set(MatchUpOriginal_expected.idx), set(MatchUpTest.idx))\n for key in MatchUpOriginal_expected.idx.keys():\n idx_i_original_test = MatchUpTest.idx[key]\n idx_i_original_expected = MatchUpOriginal_expected.idx[key]\n if isinstance(idx_i_original_expected, ndarray):\n self.assertEqual(idx_i_original_test.tolist(), idx_i_original_expected.tolist())\n else:\n self.assertEqual(idx_i_original_test, idx_i_original_expected)", "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def getTestResults():", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def exe_tests(self):\n self.rank = mpicom.rank()\n self.size = mpicom.size()\n if mpicom.parallel():\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpicom.so\")\n else:\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpistub.pyc\")\n self.test_broadcast()\n self.test_reduce()\n self.test_p2p()\n self.test_gather()\n self.test_scatter()\n #self.test_alltoall()", "def test_result(self):\n result = compute()\n self.assertEqual(result, '4782')\n print(\"eulpy25Test passed\")", "def run(self, test, env):\n\n raise NotImplementedError", "def test_processor_tests(data):\n actual = data[0]\n expected = data[1]\n assert(expected == actual)", "def c_test_run_inp(self, temp_params, base_locals):\r\n return 1", "def c_test_run_inp(self, temp_params, base_locals):\r\n return 1", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_all(self):\n # verify / source / run\n src = self.tmp()\n open(src, 'w').close()\n bscript = BaseScript(src)\n BaseScript.verify(src)\n self.assertEqual(bscript.source(), '')\n self.assertRaises(NotImplementedError, bscript.run, 'foobar')", "def test_predictor():", "def run_tests(file, samples):\n # Get the script dir, name and check if the file given exists\n test_dir = os.path.dirname(os.path.realpath(__file__))\n script_name = os.path.basename(__file__)\n if not os.path.isfile(os.path.join(test_dir, file)):\n sys.stderr.write('{0}: file \\'{1}\\' not found\\n'.format(script_name, file))\n sys.exit(0)\n\n result_dir = os.path.join(test_dir, 'results')\n if not os.path.exists(result_dir):\n os.mkdir(result_dir)\n\n # Get a path to the build dir to run iengine and cd into it\n filepath = os.path.join(test_dir, file)\n exe_path = os.path.join(os.path.join(test_dir, '..'), 'cmake-build-debug')\n os.chdir(exe_path)\n\n # Open csv files for writing to\n time_dist = open(os.path.join(result_dir, 'time.csv'), 'a')\n inference_dist = open(os.path.join(result_dir, 'inference.csv'), 'a')\n time_writer = csv.DictWriter(time_dist, delimiter=',', fieldnames=['method',\n 'file',\n 'sample',\n 'time'])\n inference_writer = csv.DictWriter(inference_dist, delimiter=',',\n fieldnames=['method', 'file',\n 'sample', 'inference_length'])\n time_writer.writeheader()\n inference_writer.writeheader()\n\n # Run through tests for all inference methods\n for method in ['FC', 'BC', 'TT']:\n timer = timeit.Timer(functools.partial(execute, filepath, method))\n avg = 0\n avg_path = 0\n\n # Run through all samples for the current inference method getting the execution\n # time and the number of inferences/models considered in the process\n for i in range(0, samples):\n print(timer.timeit(1))\n current, (result, err) = timer.timeit(1)\n avg += current * 1000\n result = result.decode('ascii').replace(',', '').replace(':', '')\n result_list = str.split(result)[1:]\n length = len(result_list)\n if method == 'TT':\n length = int(result_list[0])\n avg_path += length\n time_writer.writerow({'method': method, 'file': file, 'sample': i,\n 'time': current})\n inference_writer.writerow({'method': method, 'file': file, 'sample': i,\n 'inference_length': length})\n\n terminology = 'inferences'\n if method == 'TT':\n terminology = 'models'\n\n print('Method: {0}, Average time: {1:.3f}ms, Average {2}: {3}'.format(method,\n avg / samples, terminology, avg_path / samples))\n\n time_dist.close()\n inference_dist.close()", "def unitary_test():", "def startTestRun(self):", "def test():\n return _make_modules(is_train=False)", "def runTest(self):\n self.setUp()\n self.test_FiducialTransform1()", "def runTest(self):\n try:\n print('TestCase runTest===>')\n self.test_runner.run_test(self.testcase_dict)\n\n finally:\n self.meta_data = getattr(self.test_runner.http_client_session, \"meta_data\", {})", "def testing_phase(self):\r\n self.test_dataloader = self.get_dataloader(self.test_h5_path, \"test set\")\r\n self.get_ts_properties()\r\n\r\n self.restart_epoch = util.get_restart_epoch()\r\n print(f\"* Loading model from previous saved state (Epoch {self.restart_epoch}).\", flush=True)\r\n self.model = torch.load(\r\n self.C.job_dir + f\"model_restart_{self.restart_epoch}.pth\"\r\n )\r\n\r\n self.model.eval()\r\n with torch.no_grad():\r\n self.generate_graphs(n_samples=self.C.n_samples)\r\n\r\n print(\"* Evaluating model.\", flush=True)\r\n anal.evaluate_model(valid_dataloader=self.test_dataloader,\r\n train_dataloader=self.train_dataloader,\r\n nll_per_action=self.nll_per_action,\r\n model=self.model)\r\n\r\n self.print_time_elapsed()", "def test_T01():", "def _test_examples(self):\n checks = [\n (\n \"ex5_line-of-sight_solution\",\n [r\"RAJA sequential\", r\"RAJA OpenMP\", r\"result -- PASS\"],\n ),\n (\n \"ex6_stencil-offset-layout_solution\",\n [r\"RAJA Views \\(permuted\\)\", r\"result -- PASS\"],\n ),\n (\n \"ex8_tiled-matrix-transpose_solution\",\n [r\"parallel top inner loop\", r\"collapsed inner loops\", r\"result -- PASS\"],\n ),\n (\"kernel-dynamic-tile\", [r\"Running index\", r\"(24,24)\"]),\n (\"plugin-example\", [r\"Launching host kernel for the 10 time\"]),\n (\"tut_batched-matrix-multiply\", [r\"result -- PASS\"]),\n (\"wave-eqn\", [r\"Max Error = 2\", r\"Evolved solution to time\"]),\n ]\n for exe, expected in checks:\n reason = \"test: checking output of {0} for {1}\".format(exe, expected)\n self.run_test(\n exe,\n [],\n expected,\n installed=False,\n purpose=reason,\n skip_missing=True,\n work_dir=self._extra_tests_path,\n )", "def main():\n test_merge_quick_sort()\n test_compare()", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()", "def check():\n \n overall_report = dict()\n\n # source code analysis\n # ====================\n # currently empty\n \n # compile\n # =======\n ret_makefile = subprocess.run([config.compiler] + config.compiler_args, # command\n stdout=subprocess.PIPE, # capture stdout\n stderr=subprocess.PIPE, # capture stderr\n universal_newlines=True) # use text mode for std* file objects\n overall_report['makefile'] = ret_makefile\n \n # runtime analysis\n # ================\n with open('compile.txt', 'r') as f:\n if 'error' not in f.read().lower(): # if compilation succeeded\n overall_report, test_case_report_list = runtime_analysis(config, overall_report)\n \n # pass this info to next tools for subsequent processing\n # ======================================================\n pp(overall_report)\n # results from runtime analysis\n if 'runtime_analysis_done' in overall_report:\n success_count = 0\n for report in test_case_report_list:\n if 'timeout' in report:\n util.addFinding(\"Time limit exceeded!\", 0, \"\", \"TEST_080006\")\n elif report['return_code'] != 0:\n if report['stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something\n pass # but these findings will be added by analyze.py\n else:\n util.addFinding(\"It seems your program might have crashed.\", 0,\"\",\"TEST_100006\")\n # output_match == None means the user might have tried to print to outfile\n elif report['stdout_stream'] != '' or report['output_match'] is None:\n util.addFinding(\"A test case failed! Make sure you are not trying to print something.\",\n 0,\"\",\"TEST_100006\")\n elif not all(report['output_match']): # not all test cases passed\n util.addFinding(\"A test case failed!\", 0, \"\", \"TEST_100006\")\n else:\n success_count += 1\n\n with open('stderr.txt', 'a') as f:\n f.write(report['stderr_stream'])\n with open('stdout.txt', 'a') as f:\n f.write(report['outfile'])\n\n if success_count == len(test_case_report_list):\n util.addFinding(\"Program behaves as expected!\", 1, \"CHALLENGE_PASS\", \"TEST_900006\")\n \n util.dumpFindings()\n \n # next tools\n subprocess.run([\"./analyse.py\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n subprocess.run([\"./ai.py\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "def test_customize_test_loads(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n response = self.app.test_client().get('/test/3')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/by_id.html')\n regression_tests = RegressionTest.query.all()\n self.assertIn(regression_tests[1].command, str(response.data))\n self.assertNotIn(regression_tests[0].command, str(response.data))", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(): \n \n\n nextdata = [[21, 61, 42, 30], [33,45, 18, 29]]\n\n for xval, yval, snum, expect in nextdata:\n\n pmachine = PMachine()\n pmachine.serial_number = snum\n pmachine.run2_completion()\n result = pmachine.calc_square_total(xval, yval, showsquare=True)\n assert result == expect\n print(\"Got value {}={} as expected\".format(result, expect))", "def test_py_compile_basic(self):\n self._test_py_compile('basic')", "def eval_n_verify(yaml_data, dry_run, fail_eval):\n print('='*10, 'Verifying Results', '='*10)\n try:\n for model in yaml_data['models']:\n for i, topic in enumerate(yaml_data['topics']):\n for eval in yaml_data['evals']:\n eval_cmd = [\n os.path.join(yaml_data['root'], eval['command']),\n ' '.join(eval['params']) if eval['params'] else '',\n os.path.join(yaml_data['root'], yaml_data['qrels_root'], topic['qrel']),\n 'run.{0}.{1}.{2}'.format(yaml_data['name'], model['name'], topic['path'])\n ]\n if dry_run:\n print(' '.join(eval_cmd))\n continue\n\n out = [line for line in check_output(' '.join(eval_cmd)).decode('utf-8').split('\\n') if line.strip()][-1]\n if not out.strip():\n continue\n eval_out = out.strip().split(eval['separator'])[eval['parse_index']]\n expected = round(model['results'][eval['metric']][i], eval['metric_precision'])\n real = round(float(eval_out), eval['metric_precision'])\n if isclose(expected, real):\n print(OKBLUE, '[OK]', yaml_data['name'], model['name'], topic['name'], eval['metric'], expected, real, ENDC)\n else:\n print(FAIL, ['ERROR'], yaml_data['name'], model['name'], topic['name'], eval['metric'], expected, real, '!!!!', ENDC)\n if fail_eval:\n assert False\n finally:\n print(ENDC)", "def test_no_reproducible_for_varinat_analysis(self):\n self.testcases[0].job_type = 'some_type1'\n self.testcases[0].project_name = 'project1'\n self.testcases[0].crash_state = 'abcde'\n self.testcases[0].one_time_crasher_flag = False\n self.testcases[0].crash_type = 'crash_type1'\n self.testcases[0].security_flag = True\n self.testcases[1].job_type = 'some_type2'\n self.testcases[1].project_name = 'project1'\n self.testcases[1].crash_state = 'vwxyz'\n self.testcases[1].crash_type = 'crash_type2'\n self.testcases[1].one_time_crasher_flag = True\n self.testcases[1].security_flag = True\n\n for t in self.testcases:\n t.put()\n\n # testcase2's varinat will be evaluated against testcase1\n self.testcase_variants[0].job_type = 'fake_engine_asan_project1'\n self.testcase_variants[0].testcase_id = self.testcases[0].key.id()\n self.testcase_variants[0].security_flag = True\n self.testcase_variants[1].job_type = 'some_type1'\n self.testcase_variants[1].crash_state = 'abcde'\n self.testcase_variants[1].crash_type = 'crash_type1'\n self.testcase_variants[1].testcase_id = self.testcases[1].key.id()\n self.testcase_variants[1].security_flag = True\n\n for v in self.testcase_variants:\n v.put()\n\n grouper.group_testcases()\n\n for index, t in enumerate(self.testcases):\n self.testcases[index] = data_handler.get_testcase_by_id(t.key.id())\n self.assertEqual(self.testcases[index].group_id, 0)\n self.assertTrue(self.testcases[index].is_leader)", "def test(self):\n pass", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def _load_test_data(self):\n self._save_test_data()", "def test_e2e(self):\n\n for input_lines, expected_output_lines in zip(self.__class__.input_cases, self.__class__.expected_outputs): # extract params for sub_test.\n # Execute sub_test for each test cases.\n with self.subTest(input_lines=input_lines, expected_output_lines=expected_output_lines):\n\n # Emulate input_lines by mock\n with patch(target='builtins.input', new=self.get_input_mock(inputs=input_lines)):\n\n # Execute main logic with capturing standard output.\n with captured_stdout() as stdout:\n self.__class__.target_method() # If mock raises StopIteration here, it means that the input method has been called more than the number of supplied lines by Mock.\n actual_output = self.get_stdout_line(stdout=stdout, bottom=len(expected_output_lines))\n\n self.assertEqual(actual_output, expected_output_lines)", "def run_test(models, data):\n num_correct = 0.0\n num_total = 0.0\n\n # initialize lists to put in predictions & results for confusion matrix \n predicted_labels = []\n actual_labels = []\n langlist = models.keys()\n for ai, actual_lang in enumerate(langlist):\n test_files = open(os.path.join('traintestsplit', actual_lang+'.testlist')).read().split()\n print 'Testing', len(test_files), 'files from', actual_lang\n for filename in test_files:\n logprobs = {} # dict: total log prob of this file under each model \n for test_lang in langlist:\n logprobs[test_lang] = apply_model(models[test_lang], data[actual_lang][filename+'.npytxt'])\n predicted_lang = max(logprobs.items(), key=lambda x:x[1])[0]\n # insert prediction (of lang index) into predicted list \n predicted_labels.append(langlist.index(predicted_lang))\n actual_labels.append(ai)\n if actual_lang == predicted_lang:\n num_correct += 1\n num_total += 1\n\n print len(filter(lambda x:x==ai, predicted_labels[-len(test_files):])), 'correct'\n\n print\n print 'Accuracy', num_correct*100/num_total\n\n #CONFUSION MATRIX (y_test, y_pred) -> (actual label, predictions) \n cm = confusion_matrix(actual_labels, predicted_labels)\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # display confusion stats by lang (TODO: visualize with matplotlib) \n print '*'*20\n for ai, actual_lang in enumerate(langlist):\n print actual_lang, 'confusion:'\n for pi, predicted_lang in enumerate(langlist):\n print '{0}: {1:.2f}%'.format(predicted_lang, cm_normalized[ai, pi]*100)\n print '*'*20", "def test_main_integration(self, monkeypatch, mock_model, input_data):\n # mock out the classifier loading and predictions (in a valid way)\n def predict_fn(x):\n predictions = np.tile([True, None, False], math.ceil(len(x)/3))\n return predictions[:len(x)]\n\n monkeypatch.setattr(\"joblib.load\", lambda x: mock_model(predict_fn))\n # Note for future devs -- `args=[]` is VERY important to pass when\n # testing argschema modules using VSCode and probably other IDEs.\n # If not, will exit during parsing with no good error\n # (it's ingesting the command line args passed to the test runner)\n parser = InferenceParser(input_data=input_data, args=[])\n main(parser)\n # Check outputs exist (contents are mock result)\n assert os.path.exists(parser.args[\"output_json\"])", "def test_T4():", "def test_T4():", "def main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir-metadata',\n type=pathlib.Path, required=True)\n\n args = parser.parse_args()\n\n with LockedMetadata(args.dir_metadata, __file__) as md:\n summary_dict = {}\n passing_tests = []\n failing_tests = []\n for f in md.tests_pickle_files:\n try:\n trr = TestRunResult.construct_from_pickle(f)\n summary_dict[f\"{trr.testname}.{trr.seed}\"] = \\\n ('PASS' if trr.passed else\n 'FAILED' + (\" {T}\" if (trr.failure_mode == Failure_Modes.TIMEOUT) else \"\"))\n if trr.passed:\n passing_tests.append(trr)\n else:\n failing_tests.append(trr)\n except RuntimeError as e:\n failing_tests.append(\n TestRunResult(\n name='broken_test',\n failure_message=str(e)\n ))\n\n md.regr_log = md.dir_run/'regr.log'\n md.regr_log_junit = md.dir_run/'regr_junit.xml'\n md.regr_log_junit_merged = md.dir_run/'regr_junit_merged.xml'\n\n # Write results as junit_xml\n with open(md.regr_log_junit,\n 'w',\n encoding='UTF-8') as junit_xml,\\\n open(md.regr_log_junit_merged,\n 'w',\n encoding='UTF-8') as junit_merged_xml:\n output_run_results_junit_xml(passing_tests, failing_tests,\n junit_xml,\n junit_merged_xml)\n\n with open(md.regr_log, 'w', encoding='UTF-8') as outfile:\n # Write results as regr.log (custom logfile format)\n output_results_text(passing_tests, failing_tests, summary_dict,\n outfile)\n\n test_summary_dict = create_test_summary_dict(passing_tests +\n failing_tests)\n\n cov_summary_dict = {}\n if md.simulator == \"xlm\":\n cov_summary_dict = create_cov_summary_dict(md)\n else:\n print(\"Warning: Not generating coverage summary, unsupported \" \\\n f\"simulator {md.simulator}\")\n\n html_report_filename = md.dir_run/'report.html'\n with open(html_report_filename, 'w') as outfile:\n output_results_html(md, passing_tests + failing_tests,\n test_summary_dict, cov_summary_dict, outfile)\n\n json_report_filename = md.dir_run/'report.json'\n with open(json_report_filename, 'w') as json_report_file:\n output_results_dvsim_json(md, test_summary_dict, cov_summary_dict,\n json_report_file)\n\n svg_summary_filename = md.dir_run/'summary.svg'\n with open(svg_summary_filename, 'w') as svg_summary_file:\n output_results_svg(test_summary_dict, cov_summary_dict,\n svg_summary_file)\n\n # Print a summary line to the terminal\n print(gen_summary_line(passing_tests, failing_tests))\n\n # Succeed if no tests failed\n return 1 if failing_tests else 0", "def test_run(self):\n files = [\n (\"AS1-1.phy_r8s.txt\", \"AS1-1.phy_r8s.txt_2.5.txt\"),\n (\"AS1-3.phy_r8s.txt\", \"AS1-3.phy_r8s.txt_2.5.txt\"),\n (\"AS1-4.phy_r8s.txt\", \"AS1-4.phy_r8s.txt_2.5.txt\"),\n ]\n for file_pair in files:\n input_file = file_pair[0]\n expected_file = file_pair[1]\n infile = self.test_data_path + input_file\n outfile = self.test_data_path + expected_file\n divnum = 2.5\n result = run(infile, divnum)\n\n with open(outfile) as handle:\n expected_result = handle.read()\n self.assertEqual(expected_result, result)", "def run_tests():\n os.environ['WORKDIR'] = CONFIG['workdir']\n os.environ['REPORTDIR'] = CONFIG['reportFolder']\n stdout = subprocess.DEVNULL\n if CONFIG['verbose']:\n stdout = None\n # cycle throught version\n total = 0\n valid = 0\n start = time.time()\n for version in utils.get_dirs(CONFIG['versionsFolder']):\n os.environ['VERSION'] = version\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version), CONFIG['workdir']\\\n , CONFIG['clearWorkdir'])\n # cycle throught use case\n for usecase in utils.get_dirs(CONFIG['testsFolder']):\n os.environ['TESTDIR'] = usecase\n if not CONFIG['quiet']:\n print('UseCase test: {}'.format(usecase))\n log_msg('info', 'UseCase test: {}'.format(usecase))\n try:\n folder = os.path.join(CONFIG['testsFolder'], usecase)\n with open(os.path.join(folder, CONFIG['useConfig'])) as usefp:\n jconfig = json.load(usefp)\n # clear workdir if desired\n if 'clearWorkdir' in jconfig.keys() and jconfig['clearWorkdir']:\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version)\\\n , CONFIG['workdir'], CONFIG['clearWorkdir'])\n # print('clearing')\n # raise\n cmd = ['py', os.path.join(folder, jconfig['entrypoint'])]\n total += 1\n if jconfig['runType'] == 'single':\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n else:\n for step in range(jconfig['numRuns']):\n if not CONFIG['quiet']:\n print('\\r >Step {}/{} '.format(step+1, jconfig['numRuns'])\\\n , end='', flush=True)\n log_msg('info', 'Step {}/{}'.format(step+1, jconfig['numRuns']))\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n if step+1 != jconfig['numRuns']:\n time.sleep(jconfig['interval'])\n except subprocess.CalledProcessError as excp:\n if not CONFIG['quiet']:\n print('Error msg:{}'\\\n .format(excp.stderr.decode().replace('\\r', '').replace('\\n', '|')))\n log_msg('error', excp.stderr.decode())\n else:\n valid += 1\n if not CONFIG['quiet']:\n print('{}.....Passed'.format(usecase))\n log_msg('info', '{} Passed'.format(usecase))\n\n elapse = time.time()-start\n log_msg('info', 'Ran {} tests in {:.3f}s with {} passed'.format(total, elapse, valid))\n print('-'*20)\n print('Ran {} tests in {:.3f}s with {} passed.'.format(total, elapse, valid))\n return total-valid", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def test_output(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.predict\"]:\n params = tuple(finput.values())\n\n self_1, eta_1, P_1, z_odo_1 = deepcopy(params)\n\n self_2, eta_2, P_2, z_odo_2 = deepcopy(params)\n\n etapred_1, P_1 = EKFSLAM.EKFSLAM.predict(self_1, eta_1, P_1, z_odo_1)\n\n etapred_2, P_2 = solution.EKFSLAM.EKFSLAM.predict(self_2, eta_2, P_2, z_odo_2)\n \n assert compare(etapred_1, etapred_2)\n assert compare(P_1, P_2)\n \n assert compare(self_1, self_2)\n assert compare(eta_1, eta_2)\n assert compare(P_1, P_2)\n assert compare(z_odo_1, z_odo_2)", "def __test_similarity(self):\n\n _, test_loader, _ = create_loaders()\n\n false_counter = 0\n for (image, labels) in test_loader:\n\n output_pytorch = self._model(image).detach().numpy()\n\n im = image.numpy().flatten()\n output_manual = self.run_through_model(im)\n\n if np.allclose(output_pytorch, output_manual, rtol=1e-4, atol=1e-4) is not True:\n false_counter += 1\n\n print(f\"Number of mistakes: {false_counter}\")", "def test_run(self):\n rig_analysis_dir = \"rig_analysis\"\n analysis_root = os.path.join(self.io_args.output_root, rig_analysis_dir)\n os.makedirs(analysis_root, exist_ok=True)\n\n self.io_args.output_obj = os.path.join(analysis_root, \"final.obj\")\n self.io_args.output_equirect = os.path.join(analysis_root, \"equirect.ppm\")\n self.io_args.output_camera = os.path.join(analysis_root, \"camera.ppm\")\n self.io_args.output_camera_id = \"0\"\n self.io_args.output_cross_section = os.path.join(analysis_root, \"cross.ppm\")\n\n self.run_app(\"RigAnalyzer\")\n self.check_against_truth(\n truth=os.path.join(self.io_args.truth_dir, rig_analysis_dir),\n output=analysis_root,\n )", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def check(self, runtime):", "def test(model, data_loader, num_train_batches, epoch, test_mloss, test_rloss, test_acc, directory):\r\n print('===> Evaluate mode')\r\n\r\n # Switch to evaluate mode\r\n model.eval()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n loss = 0\r\n margin_loss = 0\r\n recon_loss = 0\r\n\r\n correct = 0\r\n\r\n num_batches = len(data_loader)\r\n\r\n global_step = epoch * num_train_batches + num_train_batches\r\n\r\n start_time = timer()\r\n\r\n for data, target in data_loader:\r\n with torch.no_grad():\r\n batch_size = data.size(0)\r\n target_indices = target\r\n target_one_hot = utils.one_hot_encode(target_indices, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n target = target_one_hot\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n target_indices.to(args.device)\r\n\r\n # Output predictions\r\n output, reconstruction = model(data, target_indices, False) # output from DigitCaps (out_digit_caps)\r\n\r\n # Sum up batch loss\r\n t_loss, m_loss, r_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss += t_loss.data\r\n margin_loss += m_loss.data\r\n recon_loss += r_loss.data\r\n\r\n # Count number of correct predictions\r\n # v_magnitude shape: [128, 10, 1, 1]\r\n v_magnitude = torch.sqrt((output**2).sum(dim=2, keepdim=True))\r\n # pred shape: [128, 1, 1, 1]\r\n pred = v_magnitude.data.max(1, keepdim=True)[1].cpu()\r\n correct += pred.eq(target_indices.view_as(pred)).sum()\r\n\r\n\r\n # Get the reconstructed images of the last batch\r\n if args.use_reconstruction_loss:\r\n reconstruction = model.decoder(output, target_indices, False)\r\n # Input image size and number of channel.\r\n # By default, for MNIST, the image width and height is 28x28 and 1 channel for black/white.\r\n image_width = args.input_width\r\n image_height = args.input_height\r\n image_channel = args.num_conv_in_channels\r\n recon_img = reconstruction.view(-1, image_channel, image_width, image_height)\r\n assert recon_img.size() == torch.Size([batch_size, image_channel, image_width, image_height])\r\n\r\n # Save the image into file system\r\n utils.save_image(recon_img, directory / ('recons_image_test_{}_{}.png'.format(epoch, global_step)))\r\n utils.save_image(data, directory /\r\n ('original_image_test_{}_{}.png'.format(epoch, global_step)))\r\n\r\n end_time = timer()\r\n\r\n # Log test losses\r\n loss /= num_batches\r\n margin_loss /= num_batches\r\n recon_loss /= num_batches\r\n\r\n # Log test accuracies\r\n num_test_data = len(data_loader.dataset)\r\n accuracy = correct / num_test_data\r\n accuracy_percentage = float(correct) * 100.0 / float(num_test_data)\r\n\r\n test_mloss.write('%.6f \\n' % margin_loss)\r\n test_rloss.write('%.6f \\n' % recon_loss)\r\n test_acc.write('%.4f \\n' % accuracy_percentage)\r\n\r\n # Print test losses and accuracy\r\n print('Test: [Loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f}]'.format(\r\n loss,\r\n margin_loss,\r\n recon_loss if args.use_reconstruction_loss else 0))\r\n print('Test Accuracy: {}/{} ({:.2f}%)\\n'.format(\r\n correct, num_test_data, accuracy_percentage))\r\n\r\n\r\n global avg_testing_time_per_epoch\r\n avg_testing_time_per_epoch = (\r\n avg_testing_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n global best_acc\r\n global best_acc_epoch\r\n if accuracy_percentage > best_acc:\r\n best_acc = accuracy_percentage\r\n best_acc_epoch = epoch\r\n test_loader = data_loader\r\n utils.dump(utils.make_full_checkpoint_obj(locals(), globals()), directory / 'trained_model/FP32_model')", "def test_oldtestcases(self):\n\t\treturn oldtests()", "def run_tests(self):\n total_tests = len(self.tests)\n this_test_passed = True\n\n with self.board as board:\n board.repl.session = b\"\"\n\n for test in self.tests:\n # we likely had a REPL reset, so make sure we're\n # past the \"press any key\" prompt.\n board.repl.execute(b\"\\x01\", wait_for_response=True)\n\n this_test_passed = True\n\n self.log.write(f\"Starting test: {test.test_file}\")\n\n test_file_path = os.path.join(test.test_dir, test.test_file)\n test_cmds = []\n\n with open(test_file_path, 'r') as current_test:\n test_cmds = current_test.readlines()\n\n for line_no, line in enumerate(test_cmds, start=1):\n if line == \"\\n\":\n continue\n\n self.log.write(\n \"running line: ({0}) {1}\".format(line_no,\n line.rstrip('\\n'))\n )\n\n try:\n if line_no in test.interactions:\n action = test.interactions[line_no][\"action\"]\n value = test.interactions[line_no][\"value\"]\n #print(f\"ACTION: {action}; VALUE: {value}\")\n if action == \"output\":\n self.log.write(\n f\"- Testing for output of: {value}\"\n )\n\n try:\n result = exec_line(board, line)\n except Exception as exc:\n raise pyboard.CPboardError(exc) from Exception\n\n result = str(result,\n encoding=\"utf-8\").rstrip(\"\\r\\n\")\n if result != value:\n this_test_passed = False\n\n self.log.write(\" - Passed!\")\n\n elif action == \"input\":\n self.log.write(f\"- Sending input: {value}\")\n\n try:\n exec_line(board, line, echo=False)\n exec_line(board, value, input=True)\n except Exception as exc:\n raise pyboard.CPboardError(exc) from Exception\n\n elif action == \"verify\":\n self.log.write(f\"- Verifying with: {value}\")\n\n try:\n # import the referenced module\n module_name, func_name = value.split(\".\")\n imprt_stmt = [\".verifiers.\", module_name]\n verifier = importlib.import_module(\n \"\".join(imprt_stmt),\n package=\"rosiepi.rosie\"\n )\n\n # now get the function object using inspect\n # so that we can dynamically run it.\n ver_func = [\n func[1] for func in\n inspect.getmembers(verifier)\n if func[0] == func_name\n ][0]\n #self.log.write(ver_func)\n\n exec_line(board, line)\n result = ver_func(board)\n if not result:\n raise pyboard.CPboardError(\n f\"'{value}' test failed.\"\n )\n except Exception as exc:\n raise pyboard.CPboardError(exc) from Exception\n\n self.log.write(\" - Passed!\")\n\n else:\n board.repl.execute(line)\n\n except pyboard.CPboardError as line_err:\n this_test_passed = False\n err_args = [str(arg) for arg in line_err.args]\n err_msg = [\n \"Test Failed!\",\n \" - Last code executed: '{}'\".format(line.strip('\\n')),\n f\" - Line: {line_no}\",\n f\" - Exception: {''.join(err_args)}\",\n ]\n self.log.write(\"\\n\".join(err_msg))\n break\n\n if this_test_passed != True:\n break\n\n test.test_result = this_test_passed\n self.tests_run += 1\n test.repl_session = board.repl.session\n #print(board.repl.session)\n self.log.write(\"-\"*60)\n board.repl.reset()\n\n for test in self.tests:\n if test.test_result == None:\n continue\n elif test.test_result == True:\n self.tests_passed += 1\n elif test.test_result == False:\n self.tests_failed += 1\n\n end_msg = [\n f\"Ran {self.tests_run} of {total_tests} tests.\",\n f\"Passed: {self.tests_passed}\",\n f\"Failed: {self.tests_failed}\",\n ]\n self.log.write(\"\\n\".join(end_msg))", "def check_all(self):\n # TODO: this should use functions in execute.py to run tests in-sequence so that variable\n # name collisions are accounted for\n self._log_event(EventType.BEGIN_CHECK_ALL)\n\n # TODO: this is a janky way of resolving where the tests are. Formalize a method of \n # determining this and put it into a method in e.g. utils.py\n tests = [os.path.split(file)[1][:-3] for file in glob(os.path.join(self._path, \"*.py\")) \\\n if \"__init__.py\" not in file]\n if len(tests) == 0:\n nb_path = self._resolve_nb_path(None)\n with open(nb_path, encoding=\"utf-8\") as f:\n nb = json.load(f)\n tests = list(nb[\"metadata\"][NOTEBOOK_METADATA_KEY][\"tests\"].keys())\n\n global_env = inspect.currentframe().f_back.f_back.f_globals\n results = []\n if not _SHELVE:\n for test_name in sorted(tests):\n results.append(self.check(test_name, global_env))\n else:\n log = Log.from_file(_OTTER_LOG_FILENAME, ascending=False)\n for file in sorted(tests):\n if \"__init__.py\" not in file:\n test_name = os.path.splitext(os.path.split(file)[1])[0]\n\n entry = log.get_question_entry(test_name)\n env = entry.unshelve()\n global_env.update(env)\n del locals()[\"env\"]\n\n result = self.check(test_name, global_env)\n results.append((test_name, result))\n\n return GradingResults(results)", "def run_test_procedures(self):\r\n print('\\nDeploying prediction model...\\n')\r\n self.final_df_output = pd.read_json(path.data_final)\r\n self.final_df_output.sort_index(inplace=True)\r\n self.fill_testing_dates()\r\n self.perform_backtests()\r\n self.create_full_predictions_dataframe()\r\n print('\\nDeployment complete!')", "def _lv_test(self):\n raise NotImplementedError('Levene Test is not implemented')", "def _test1():\n sys.argv.append('--Numeric')\n from . import numpytools as N\n verify(N)\n sys.argv[-1] = '--numarray'\n reload(N)\n verify(N)\n sys.argv[-1] = '--numpy'\n reload(N)\n verify(N)", "def test_RawRun_process():\n for style in test_runs:\n test_runs[style].process()\n # now compare all images with ref\n ref = sorted(glob.glob('tests/data/processed_ref/*/*/*'))\n outputs = sorted(glob.glob('tests/data/processed/*/*/*'))\n for ref, out in zip(ref, outputs):\n print ref\n print out\n assert_image_equal(ref, out)", "def test_generate_nb_testing(self):\n pass", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "def main():\n run_test_all()", "def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def __main() :\n launchTests()", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def testBuildProcessedData(self) -> None:\n preprocessedData = [\"jglskajgCaptgakljd\", \"MissMissMissMiss\", \"Don akgjdflagjkl\", \"aafdfdDr\"]\n self._nameClassifierBuilder._preprocessedData = preprocessedData\n self._nameClassifierBuilder._buildProcessedData()\n solution = [\n [1.0,0.0,0.0,0.0,0.0,0.0],\n [0.0,1.0,0.0,0.0,0.0,0.0],\n [1.0,0.0,0.0,0.0,0.0,0.0],\n [0.0,0.0,0.0,1.0,0.0,0.0]\n ]\n self.assertEquals(solution, self._nameClassifierBuilder._processedData)" ]
[ "0.6638031", "0.6631693", "0.64999235", "0.6455514", "0.6418419", "0.6384221", "0.63442385", "0.632622", "0.62952894", "0.6268668", "0.6258088", "0.6243169", "0.6238762", "0.62081796", "0.61851895", "0.6180172", "0.611677", "0.6106589", "0.6078657", "0.60766435", "0.6063972", "0.6063972", "0.60331696", "0.6021186", "0.6009289", "0.600271", "0.5995771", "0.59926826", "0.5975383", "0.5975383", "0.5975383", "0.59726256", "0.59668154", "0.59501404", "0.5940479", "0.5937566", "0.5906369", "0.58814526", "0.58756435", "0.5855597", "0.5855597", "0.5853584", "0.5853584", "0.5853584", "0.5853584", "0.5853584", "0.585327", "0.58478135", "0.5841588", "0.5836285", "0.5835798", "0.583143", "0.5827473", "0.58222413", "0.58165514", "0.5804475", "0.5800076", "0.5791011", "0.5789675", "0.57856476", "0.5784651", "0.57828754", "0.57781535", "0.57780325", "0.57665324", "0.5765756", "0.57654965", "0.5760921", "0.57586676", "0.5758218", "0.5756074", "0.57493174", "0.5744149", "0.57320815", "0.5730558", "0.5730558", "0.57264566", "0.57254434", "0.5723393", "0.5723124", "0.5722294", "0.5715924", "0.5715671", "0.5712502", "0.57108617", "0.5704625", "0.5698386", "0.569676", "0.56930375", "0.5691818", "0.5682345", "0.5676791", "0.56731194", "0.56721926", "0.5669731", "0.56657994", "0.5665678", "0.566406", "0.5654911", "0.56539446", "0.56457525" ]
0.0
-1
This is a wrapper API to compile and run models as test for AoT
def compile_and_run( models: Union[List[AOTTestModel], AOTTestModel], runner: AOTTestRunner, interface_api: str, use_unpacked_api: bool, debug_calculated_workspaces: bool = False, workspace_byte_alignment: int = 8, constant_byte_alignment: int = 8, enable_op_fusion: bool = True, data_linkage: AOTDataLinkage = None, use_runtime_executor: bool = True, target: Union[str, tvm.target.Target, List[tvm.target.Target]] = "c", target_opts: Dict = None, test_dir: str = None, verbose: bool = False, schedule_name: str = None, debug_last_error: bool = False, checker: Optional[Callable[[str], bool]] = None, ) -> bool: if target_opts: for key, val in target_opts.items(): target += f" {key}={val}" if isinstance(target, str): target = tvm.target.Target(target) compiled_test_mods = compile_models( models=models, interface_api=interface_api, use_unpacked_api=use_unpacked_api, workspace_byte_alignment=workspace_byte_alignment, constant_byte_alignment=constant_byte_alignment, enable_op_fusion=enable_op_fusion, pass_config=runner.pass_config, use_runtime_executor=use_runtime_executor, target=target, schedule_name=schedule_name, ) return run_and_check( models=compiled_test_mods, runner=runner, interface_api=interface_api, debug_calculated_workspaces=debug_calculated_workspaces, workspace_byte_alignment=workspace_byte_alignment, constant_byte_alignment=constant_byte_alignment, data_linkage=data_linkage, test_dir=test_dir, verbose=verbose, debug_last_error=debug_last_error, checker=checker, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_build_model(arguments):\n ...", "def run_tests():\n source1 = TextModel('prep')\n source1.add_file('source_model_1.txt')\n \n source2 = TextModel('athletes')\n source2.add_file('source_model_2.txt')\n\n new1 = TextModel('my_writing')\n new1.add_file('my_writing.txt')\n new1.classify(source1, source2)\n\n # Add code for three other new models below.", "def test_model():\n pass", "def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")", "def run_all_tests():\n model_configs = (model_handler.ModelConfig(\n saved_model_dir=platform_test.test_src_dir_path(\n \"python/compiler/tensorrt/model_tests/sample_model\"),\n default_batch_size=128),)\n if FLAGS.use_tf2:\n model_handler_cls = model_handler.ModelHandlerV2\n trt_model_handeler_cls = model_handler.TrtModelHandlerV2\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=True)\n else:\n model_handler_cls = model_handler.ModelHandlerV1\n trt_model_handeler_cls = model_handler.TrtModelHandlerV1\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=False)\n for model_config in model_configs:\n trt_convert_params = default_trt_convert_params._replace(\n max_batch_size=model_config.default_batch_size)\n base_model = model_handler_cls(model_config)\n random_inputs = base_model.generate_random_inputs()\n base_model_result = base_model.run(random_inputs)\n trt_fp32_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP32)).run(random_inputs)\n trt_fp16_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP16)).run(random_inputs)\n\n logging.info(\"Base model latency: %f ms\",\n _get_mean_latency(base_model_result))\n logging.info(\"TensorRT FP32 model latency: %f ms\",\n _get_mean_latency(trt_fp32_model_result))\n logging.info(\"TensorRT FP16 model latency: %f ms\",\n _get_mean_latency(trt_fp16_model_result))", "def create_model(self):\n self.skipTest(\"Base module should not be tested.\")", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def test_model_initialization():\n MyModel(\"model\", SkillContext())", "def runTest(self):\n self.setUp()\n self.test_STLModelBuilder1()", "def run_model(config_file):\n config_file = os.path.join(os.getcwd(), config_file)\n result = Tethys(config_file=config_file)\n result.run_model()\n return result", "def test_pregenerated_model(sub_test, case):\n\n if case.startswith(\"sensi2\"):\n model_name = sub_test + \"_o2\"\n else:\n model_name = sub_test\n\n model_swig_folder = str(\n Path(__file__).parents[2]\n / \"build\"\n / \"tests\"\n / \"cpp\"\n / f\"external_{model_name}-prefix\"\n / \"src\"\n / f\"external_{model_name}-build\"\n / \"swig\"\n )\n\n test_model_module = amici.import_model_module(\n module_name=model_name, module_path=model_swig_folder\n )\n model = test_model_module.getModel()\n solver = model.getSolver()\n amici.readModelDataFromHDF5(\n options_file, model.get(), f\"/{sub_test}/{case}/options\"\n )\n amici.readSolverSettingsFromHDF5(\n options_file, solver.get(), f\"/{sub_test}/{case}/options\"\n )\n\n edata = None\n if \"data\" in expected_results[sub_test][case].keys():\n edata = amici.readSimulationExpData(\n str(expected_results_file), f\"/{sub_test}/{case}/data\", model.get()\n )\n rdata = amici.runAmiciSimulation(model, solver, edata)\n\n check_derivative_opts = dict()\n\n if model_name == \"model_nested_events\":\n check_derivative_opts[\"rtol\"] = 1e-2\n elif model_name == \"model_events\":\n check_derivative_opts[\"atol\"] = 1e-3\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n and not model_name.startswith(\"model_neuron\")\n and not case.endswith(\"byhandpreeq\")\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n verify_simulation_opts = dict()\n\n if model_name.startswith(\"model_neuron\"):\n verify_simulation_opts[\"atol\"] = 1e-5\n verify_simulation_opts[\"rtol\"] = 1e-2\n\n if model_name.startswith(\"model_robertson\") and case == \"sensiforwardSPBCG\":\n verify_simulation_opts[\"atol\"] = 1e-3\n verify_simulation_opts[\"rtol\"] = 1e-3\n\n verify_simulation_results(\n rdata, expected_results[sub_test][case][\"results\"], **verify_simulation_opts\n )\n\n if model_name == \"model_steadystate\" and case == \"sensiforwarderrorint\":\n edata = amici.amici.ExpData(model.get())\n\n # Test runAmiciSimulations: ensure running twice\n # with same ExpData yields same results\n if (\n edata\n and model_name != \"model_neuron_o2\"\n and not (model_name == \"model_robertson\" and case == \"sensiforwardSPBCG\")\n ):\n if isinstance(edata, amici.amici.ExpData):\n edatas = [edata, edata]\n else:\n edatas = [edata.get(), edata.get()]\n\n rdatas = amici.runAmiciSimulations(\n model, solver, edatas, num_threads=2, failfast=False\n )\n verify_simulation_results(\n rdatas[0],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n verify_simulation_results(\n rdatas[1],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n\n # test residuals mode\n if solver.getSensitivityMethod() == amici.SensitivityMethod.adjoint:\n with pytest.raises(RuntimeError):\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n else:\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"res\", \"sres\", \"y\", \"sy\", \"sigmay\", \"ssigmay\"],\n **verify_simulation_opts,\n )\n with pytest.raises(RuntimeError):\n solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n\n chi2_ref = rdata.chi2\n\n # test likelihood mode\n solver.setReturnDataReportingMode(amici.RDataReporting.likelihood)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"llh\", \"sllh\", \"s2llh\", \"FIM\"],\n **verify_simulation_opts,\n )\n\n # test sigma residuals\n\n if (\n model_name == \"model_jakstat_adjoint\"\n and solver.getSensitivityMethod() != amici.SensitivityMethod.adjoint\n ):\n model.setAddSigmaResiduals(True)\n solver.setReturnDataReportingMode(amici.RDataReporting.full)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether activation changes chi2\n assert chi2_ref != rdata.chi2\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n chi2_ref = rdata.chi2\n res_ref = rdata.res\n\n model.setMinimumSigmaResiduals(100)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether changing the minimum changes res but not chi2\n assert np.isclose(chi2_ref, rdata.chi2)\n assert not np.allclose(res_ref, rdata.res)\n\n model.setMinimumSigmaResiduals(-10)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether having a bad minimum results in nan chi2\n assert np.isnan(rdata.chi2)\n\n with pytest.raises(RuntimeError):\n model.getParameterByName(\"thisParameterDoesNotExist\")", "def run(self, test, env):\n\n raise NotImplementedError", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def test_model_found(arguments):\n ...", "def run_tests():\n source1 = TextModel(\"Barack Obama\")\n source1.add_file('project/source_texts/barackobama_source_text.txt')\n\n source2 = TextModel('Donald Trump')\n source2.add_file('project/source_texts/donaldtrump_source_text.txt')\n\n new1 = TextModel('More Obama')\n new1.add_file('project/source_texts/moreobama_source_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('More Trump')\n new2.add_file('project/source_texts/moretrump_source_text.txt')\n new2.classify(source1, source2)\n\n new1 = TextModel('Gucci Gang by Lil Pump')\n new1.add_file('project/source_texts/guccigang_source_text.txt')\n new1.classify(source1, source2)\n\n new1 = TextModel(\"Spongebob Transcripts\")\n new1.add_file('project/source_texts/spongebobeps_source_text.txt')\n new1.classify(source1, source2)", "def compile_models(\n models: Union[List[AOTTestModel], AOTTestModel],\n interface_api: str,\n use_unpacked_api: bool,\n workspace_byte_alignment: int = 8,\n constant_byte_alignment: int = 8,\n enable_op_fusion: bool = True,\n pass_config: Dict[str, Any] = None,\n use_runtime_executor: bool = True,\n target: tvm.target.Target = tvm.target.Target(\"c\"),\n workspace_memory_pools=None,\n constant_memory_pools=None,\n schedule_name: str = None,\n) -> List[AOTCompiledTestModel]:\n if not isinstance(models, list):\n models = [models]\n\n runtime = Runtime(\"crt\")\n executor = Executor(\n \"aot\",\n {\n \"workspace-byte-alignment\": workspace_byte_alignment,\n \"constant-byte-alignment\": constant_byte_alignment,\n \"interface-api\": interface_api,\n \"unpacked-api\": use_unpacked_api,\n },\n )\n\n config = {\"tir.disable_vectorize\": True}\n if pass_config:\n config = {**config, **pass_config}\n if not enable_op_fusion:\n config[\"relay.FuseOps.max_depth\"] = 1\n\n compiled_mods = list()\n for model in models:\n with contextlib.ExitStack() as context_stack:\n if schedule_name:\n # Testing with deterministic schedule\n task_list = autotvm.task.extract_from_program(\n model.module, target=target, params=model.params\n )\n context_stack.enter_context(\n tvm.autotvm.apply_fixed_config(task_list, schedule_name)\n )\n\n context_stack.enter_context(tvm.transform.PassContext(opt_level=3, config=config))\n\n build_kwargs = dict(\n ir_mod=model.module,\n params=model.params,\n mod_name=model.name,\n )\n\n # TODO(Mousius) - Remove once executor/runtime are fully removed from Target\n if use_runtime_executor:\n build_kwargs.update(\n dict(\n target=target,\n executor=executor,\n runtime=runtime,\n workspace_memory_pools=workspace_memory_pools,\n constant_memory_pools=constant_memory_pools,\n )\n )\n else:\n build_kwargs.update(dict(target=tvm.target.Target(target, host=target)))\n\n executor_factory = tvm.relay.build(**build_kwargs)\n compiled_mods.append(\n AOTCompiledTestModel(model=model, executor_factory=executor_factory)\n )\n return compiled_mods", "def run_and_check(\n models: List[AOTCompiledTestModel],\n runner: AOTTestRunner,\n interface_api: str,\n debug_calculated_workspaces=False,\n workspace_byte_alignment=8,\n constant_byte_alignment=8,\n data_linkage: AOTDataLinkage = None,\n test_dir: str = None,\n verbose: bool = False,\n use_workspace_io: bool = False,\n debug_last_error: bool = False,\n checker: Optional[Callable[[str], bool]] = None,\n):\n\n def run_and_check_body(base_path):\n cflags = (\n f\"-DTVM_RUNTIME_ALLOC_ALIGNMENT_BYTES={workspace_byte_alignment} \"\n f\" -DTVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES={constant_byte_alignment} \"\n )\n # The calculated workspaces will not account for stack allocator tags used for debugging\n if debug_calculated_workspaces:\n cflags += \"-DTVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK \"\n\n base_path = os.path.abspath(base_path)\n build_path = os.path.join(base_path, \"build\")\n os.makedirs(build_path, exist_ok=True)\n\n include_path = os.path.join(base_path, \"include\")\n os.mkdir(include_path)\n tvm.micro.copy_crt_config_header(\"crt\", include_path)\n\n workspace_bytes = 0\n for compiled_model in models:\n model = compiled_model.model\n tar_file = os.path.join(base_path, f\"{model.name}.tar\")\n export_model_library_format(compiled_model.executor_factory, tar_file)\n t = tarfile.open(tar_file)\n t.extractall(base_path)\n\n # Interface C APIs does not need compiler generated\n # workspace to generate the test application, because\n # workspace size is codegen'd as a macro to\n # tvmgen_<model_name>.h.\n if interface_api != \"c\":\n workspace_bytes += mlf_extract_workspace_size_bytes(tar_file)\n\n workspace_bytes += model.extra_memory_in_bytes\n for key in model.inputs:\n sanitized_tensor_name = re.sub(r\"\\W\", \"_\", key)\n _create_header_file(\n f'{_mangle_name(model.name, \"input_data\")}_{sanitized_tensor_name}',\n model.inputs[key],\n include_path,\n data_linkage,\n )\n\n for key in model.outputs:\n sanitized_tensor_name = re.sub(r\"\\W\", \"_\", key)\n _create_header_file(\n f'{_mangle_name(model.name, \"output_data\")}_{sanitized_tensor_name}',\n np.zeros(model.outputs[key].shape, model.outputs[key].dtype),\n include_path,\n data_linkage,\n )\n _create_header_file(\n f'{_mangle_name(model.name, \"expected_output_data\")}_{sanitized_tensor_name}',\n model.outputs[key],\n include_path,\n data_linkage,\n )\n\n use_usmp = runner.pass_config.get(\"tir.usmp.enable\", False)\n # We only need the stack allocator if USMP is not used\n use_stack_allocator = not use_usmp\n\n _create_main(\n \"test.c\",\n models,\n build_path,\n runner.includes,\n runner.prologue,\n runner.epilogue,\n data_linkage,\n interface_api,\n workspace_bytes,\n use_stack_allocator,\n use_workspace_io,\n debug_last_error,\n )\n\n if checker and (not checker(base_path)):\n return False\n\n # Verify that compiles fine\n file_dir = os.path.dirname(os.path.abspath(__file__))\n makefile_dir = os.path.join(file_dir, \"../../../tests/python/relay/aot\")\n codegen_path = os.path.join(base_path, \"codegen\")\n makefile = os.path.join(makefile_dir, f\"{runner.makefile}.mk\")\n fvp_dir = \"/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/\"\n # TODO(@grant-arm): Remove once ci_cpu docker image has been updated to FVP_Corstone_SSE\n if not os.path.isdir(fvp_dir):\n fvp_dir = \"/opt/arm/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4/\"\n custom_params = \" \".join(\n [f\" {param}='{value}'\" for param, value in runner.parameters.items()]\n )\n make_command = (\n f\"make -f {makefile} build_dir={build_path}\"\n + f\" CFLAGS='{cflags}'\"\n + f\" TVM_ROOT={file_dir}/../../..\"\n + f\" AOT_TEST_ROOT={makefile_dir}\"\n + f\" CODEGEN_ROOT={codegen_path}\"\n + f\" STANDALONE_CRT_DIR={tvm.micro.get_standalone_crt_dir()}\"\n + f\" FVP_DIR={fvp_dir}\"\n + custom_params\n )\n\n compile_log_path = os.path.join(build_path, \"test_compile.log\")\n compile_command = f\"{make_command} aot_test_runner\"\n if verbose:\n print(\"Compile command:\\n\", compile_command)\n _subprocess_check_log_output(compile_command, \".\", compile_log_path)\n\n # Verify that runs fine\n run_log_path = os.path.join(build_path, \"test_run.log\")\n run_command = f\"{make_command} run\"\n if verbose:\n print(\"Run command:\\n\", run_command)\n\n _subprocess_check_log_output(run_command, build_path, run_log_path)\n\n with open(run_log_path) as run_log:\n assert AOT_SUCCESS_TOKEN in run_log.read()\n\n return True\n\n if test_dir is None:\n tmpdir = utils.tempdir()\n return run_and_check_body(os.path.join(tmpdir.path, \"test\"))\n else:\n return run_and_check_body(test_dir)", "def testModel( self, classTest, classPred):", "def main():\n config = handle_args()\n print(config)\n\n if config['mode'] == 'train':\n logger.info('Train mode')\n trainer = Trainer(config, shared_theano_params=None, **config)\n trainer.train(**config)\n logger.warn('Exiting train mode')\n else:\n logger.info('Test mode')\n translator = Translator(**config)\n translator.load_from_disk(config['models'], config['configs'],\n config['src_dicts'], config['trg_dict'])\n translator.translate_and_save(**config)", "def runtests():\r\n\r\n app_abspath = os.path.dirname(os.path.dirname(__file__))\r\n models_abspath = os.path.join(app_abspath, 'models.py')\r\n models_exists = os.path.isfile(models_abspath)\r\n urls_abspath = os.path.join(app_abspath, 'urls.py')\r\n urls_exists = os.path.isfile(urls_abspath)\r\n views_abspath = os.path.join(app_abspath, 'views')\r\n views_exists = os.path.isdir(views_abspath)\r\n tpls_abspath = os.path.join(app_abspath, 'templates')\r\n tpls_exists = os.path.isdir(tpls_abspath)\r\n\r\n for f in [models_abspath, urls_abspath]:\r\n if os.path.isfile(f):\r\n subprocess.call('cp {} {}.orig'.format(f, f), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(views_abspath, views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(tpls_abspath, tpls_abspath), shell=True)\r\n\r\n overwrite_project_language('ja')\r\n subprocess.call('python manage.py generatescaffold test_app I18nModel title:string', shell=True)\r\n time.sleep(1)\r\n overwrite_project_language('en-us')\r\n time.sleep(1)\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedNoTimestampModel title:string description:text --no-timestamps', shell=True)\r\n time.sleep(2) # Give time for Django's AppCache to clear\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedModel title:string description:text', shell=True)\r\n\r\n test_status = subprocess.call('python manage.py test --with-selenium --with-selenium-fixtures --with-cherrypyliveserver --noinput', shell=True)\r\n\r\n if models_exists:\r\n subprocess.call('mv {}.orig {}'.format(models_abspath, models_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(models_abspath), shell=True)\r\n\r\n if urls_exists:\r\n subprocess.call('mv {}.orig {}'.format(urls_abspath, urls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(urls_abspath), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(views_abspath, views_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(tpls_abspath, tpls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n\r\n subprocess.call('rm {}/*.pyc'.format(app_abspath), shell=True)\r\n\r\n sys.exit(test_status)", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def cli(sys_argv: List[str]):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model_definition', type=str,\n help='Path to json model definition')\n\n parser.add_argument('--model_state_path', type=str,\n help='Path where to the trained parameters')\n\n parser.add_argument('--data_path', type=str, default=TEST_PATH,\n help='path to the pickled dataframe on which prediction should be made')\n\n parser.add_argument('--numerical_preprocessor', type=str, default=NUMERICAL_PREPROCESSOR_SAVE_PATH,\n help='Path of the saved numerical preprocessor')\n\n parser.add_argument('--categorical_preprocessor', type=str, default=CATEGORICAL_PREPROCESSOR_SAVE_PATH,\n help='Path to the saved categorical preprocessor')\n\n parser.add_argument('--output_directory', type=str, default=RESULTS_DIR,\n help='Path where to save the prediction of the experiment')\n\n args = parser.parse_args(sys_argv)\n\n # # ---------- parse config file ---------- # #\n config: dict = json.load(open(args.model_definition, 'r'))\n\n model_class: str = config['model_class']\n model_name: str = config['model_name']\n numerical_input_features: List[str] = config['data']['numerical_input_features']\n categorical_input_features: List[str] = config['data']['categorical_input_features']\n output_features: List[str] = config['data']['output_features']\n batch_size_test: int = config['data']['batch_size_test']\n\n device = torch.device(CUDA if torch.cuda.is_available() else CPU)\n\n # # ---------- parse model state ---------- # #\n model_state = load_model_state(args.model_state_path, device)\n\n model_hyperparameters: dict = model_state['hyperparameters']\n model_hyperparameters.update(config['model'])\n model_hyperparameters['device']: torch.device = device\n model_weights: dict = model_state['best_model_state_dict']\n\n # # ---------- initialize model ---------- # #\n model = REGISTERED_MODELS[model_class](**model_hyperparameters).to(device)\n model.load(model_weights)\n\n # # ---------- preprocess data for inference ---------- # #\n test_loader = preprocess_for_inference(\n args.data_path,\n numerical_input_features,\n categorical_input_features,\n output_features,\n args.numerical_preprocessor,\n args.categorical_preprocessor,\n batch_size_test=batch_size_test\n )\n\n # # ---------- compute and save predictions ---------- # #\n predictions = model.predict(test_loader)\n\n # save predictions\n data_file_name = os.path.basename(args.data_path)\n data_file_name = os.path.splitext(data_file_name)[0] # remove extension\n model_path = '{}/predictions_{}_{}.pickle'.format(args.output_directory, model_name, data_file_name)\n print(' [predict] Saving predictions at: `{}`'.format(model_path))\n file_utils.save_to_pickle(\n predictions,\n path=model_path\n )\n print(' [predict] Done')", "def test():\n return _make_modules(is_train=False)", "def test_generate_all_testing(self):\n pass", "def runTests(self):\n \n pass", "def main():\n run_test_all()", "def runtest(self):", "def test_models(self) -> None:\n full_name = \"owenstranathan/kupy-test\"\n self.assertEqual(full_name, self.project.full_name)\n self.assertEqual(sha1(full_name), self.project.id)\n self.assertEqual('12345', self.project.secrets['SECRET_TOKEN'])\n self.assertIn(self.build, list(self.project.builds))\n self.assertIn(self.deploy, list(self.project.deploys))\n\n build_id = sha1(f\"{self.project.id}/{self.commit_id}\")\n self.assertEqual(self.build.id, build_id)\n self.assertEqual(self.build.branch, \"develop\")\n self.assertEqual(self.build.commit_id, self.commit_id)\n self.assertEqual(self.build.project, self.project)\n self.assertIn(self.deploy, list(self.build.deploys))\n\n deploy_id = sha1(f\"{self.project.id}/{self.build.id}\")\n self.assertEqual(deploy_id, self.deploy.id)\n self.assertEqual(self.project, self.deploy.project)\n self.assertEqual(self.build, self.deploy.build)", "def __call__(self,setup_options=True, instantiate_options=True, verbose=False):\n model = self.setup(setup_options)\n model(instantiate_options, verbose)\n return model", "def run_models(\n self,\n normal=True,\n interrupt=True,\n run_start=None,\n state_builder=\"acis\",\n hrc=False,\n ):\n if hrc:\n loads = hrc_loads\n else:\n loads = test_loads\n if normal and \"normal\" in loads:\n for load in loads[\"normal\"]:\n self.run_model(\n load_week=load,\n run_start=run_start,\n state_builder=state_builder,\n )\n if interrupt and \"interrupt\" in loads:\n for load in loads[\"interrupt\"]:\n self.run_model(\n load_week=load,\n interrupt=True,\n run_start=run_start,\n state_builder=state_builder,\n )", "def test_get_model(self) -> None:\n get_model()", "def main():\n \n opt, args = get_options()\n outdir = opt.outdir\n dbfilename = os.path.join(outdir, 'test.db3')\n import Ska.DBI\n dbh = Ska.DBI.DBI(dbi='sqlite', server=dbfilename)\n model = timelines_test.run_model( opt, dbh)\n\n #timelines_test.cmp_states( opt, dbfilename )\n #timelines_test.cmp_timelines( opt, dbfilename )", "def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def main():\r\n parser = get_parser()\r\n config = parser.parse_args(['--cfg', 'config.yaml'])\r\n result_filing.init_config_vars(config)\r\n run_id = config.info.run_id\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n\r\n operation = config.info.operation_type\r\n logger.info(\"Selected operation type %s.\"%(operation))\r\n if operation == const.TRAIN_OP:\r\n train.train_model(config)\r\n elif operation == const.DEPLOY_OP:\r\n test.test_model(config)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def test_build_model(self):\n with tempfile.TemporaryDirectory() as tempdir:\n eval_file = os.path.join(tempdir, \"en_test.dev.txt\")\n with open(eval_file, \"w\", encoding=\"utf-8\") as fout:\n fout.write(fake_text_1)\n train_file = os.path.join(tempdir, \"en_test.train.txt\")\n with open(train_file, \"w\", encoding=\"utf-8\") as fout:\n for i in range(1000):\n fout.write(fake_text_1)\n fout.write(\"\\n\")\n fout.write(fake_text_2)\n fout.write(\"\\n\")\n save_name = 'en_test.forward.pt'\n vocab_save_name = 'en_text.vocab.pt'\n checkpoint_save_name = 'en_text.checkpoint.pt'\n args = ['--train_file', train_file,\n '--eval_file', eval_file,\n '--eval_steps', '0', # eval once per opoch\n '--epochs', '2',\n '--cutoff', '1',\n '--batch_size', '%d' % len(fake_text_1),\n '--lang', 'en',\n '--shorthand', 'en_test',\n '--save_dir', tempdir,\n '--save_name', save_name,\n '--vocab_save_name', vocab_save_name,\n '--checkpoint_save_name', checkpoint_save_name]\n args = charlm.parse_args(args)\n charlm.train(args)\n\n assert os.path.exists(os.path.join(tempdir, vocab_save_name))\n\n # test that saving & loading of the model worked\n assert os.path.exists(os.path.join(tempdir, save_name))\n model = char_model.CharacterLanguageModel.load(os.path.join(tempdir, save_name))\n\n # test that saving & loading of the checkpoint worked\n assert os.path.exists(os.path.join(tempdir, checkpoint_save_name))\n model = char_model.CharacterLanguageModel.load(os.path.join(tempdir, checkpoint_save_name))\n trainer = char_model.CharacterLanguageModelTrainer.load(args, os.path.join(tempdir, checkpoint_save_name))\n\n assert trainer.global_step > 0\n assert trainer.epoch == 2\n\n # quick test to verify this method works with a trained model\n charlm.get_current_lr(trainer, args)\n\n # test loading a vocab built by the training method...\n vocab = charlm.load_char_vocab(os.path.join(tempdir, vocab_save_name))\n trainer = char_model.CharacterLanguageModelTrainer.from_new_model(args, vocab)\n # ... and test the get_current_lr for an untrained model as well\n # this test is super \"eager\"\n assert charlm.get_current_lr(trainer, args) == args['lr0']", "def test_training(self):\n\t\tpass", "def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()", "def test_auto_gen_models(tmpdir_factory):\n # Create temp file\n fn = tmpdir_factory.mktemp(\"data\").join('models.py')\n expected_file = str(fn)\n\n # Autogen models\n auto_gen_models(config_name='testing', refresh_schema=True,\n model_filepath=expected_file)\n\n assert os.path.isfile(expected_file)\n\n # Check content\n with open(expected_file, 'r') as f:\n content = f.read()\n # assert 'ModelMixins' in content\n assert 'metadata' in content", "def tests():", "def setUp(self):\n self.my_model1 = BaseModel()\n self.my_model1.name = \"hello\"\n self.my_model1.number = 9\n self.my_model2 = BaseModel()\n self.my_model2.name = \"goodbye\"\n self.my_model2.number = 19\n self.mock_stdin = create_autospec(sys.stdin)\n self.mock_stdout = create_autospec(sys.stdout)", "def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None", "def test_compile_model(sequential_model, model_data, fitted_model):\n x_train, y_train, x_val, y_val, x_test, _ = model_data\n try:\n fitted_model\n except RuntimeError:\n pytest.fail(\"Unexpected RuntimeError: Model needs to be compiled before fitting.\")", "def main():\n flags = PARSER.parse_args()\n\n if flags.to == 'savedmodel':\n to_savedmodel(input_shape=flags.input_shape,\n model_fn=unet_fn,\n src_dir=flags.checkpoint_dir,\n dst_dir='./saved_model',\n input_names=['IteratorGetNext'],\n output_names=['total_loss_ref'],\n use_amp=flags.use_amp,\n use_xla=flags.use_xla,\n compress=flags.compress)\n if flags.to == 'tensorrt':\n ds = Dataset(data_dir=flags.data_dir,\n batch_size=1,\n augment=False,\n gpu_id=0,\n num_gpus=1,\n seed=42)\n iterator = ds.test_fn(count=1).make_one_shot_iterator()\n features = iterator.get_next()\n\n sess = tf.Session()\n\n def input_data():\n return {'input_tensor:0': sess.run(features)}\n\n to_tensorrt(src_dir=flags.savedmodel_dir,\n dst_dir='./tf_trt_model',\n precision=flags.precision,\n feed_dict_fn=input_data,\n num_runs=1,\n output_tensor_names=['Softmax:0'],\n compress=flags.compress)\n if flags.to == 'onnx':\n to_onnx(src_dir=flags.savedmodel_dir,\n dst_dir='./onnx_model',\n compress=flags.compress)", "def test_create_run(self):\n pass", "def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def run_task(data_dir, task_id):\n print(\"Train and test for task %d ...\" % task_id)\n\n print(\"We are going to use this\")\n \n\n # Parse data\n train_files = glob.glob('%s/qa3_*_train.txt' % (data_dir, task_id))\n test_files = glob.glob('%s/qa3_*_test.txt' % (data_dir, task_id))\n\n dictionary = {\"nil\": 0}\n train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)\n test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)\n\n general_config = BabiConfig(train_story, train_questions, dictionary)\n\n\n # #### R: this line build a empty model to train\n # memory, model, loss = build_model(general_config)\n\n # if general_config.linear_start:\n # train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n # else:\n # train(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n\n\n\n # memory, model, loss = build_model(general_config)\n\n # this line\n test(test_story, test_questions, test_qstory, memory, model, loss, general_config)", "def local(ctx):\n _do_codegen_user(\"demo\")\n _do_codegen_user(\"errors\")\n _do_codegen_user(\"mpi\")\n _do_codegen_user(\"omp\")\n _do_codegen_user(\"python\")\n\n # Do codegen for libfake\n for so in LIB_FAKE_FILES:\n _do_codegen_file(so)\n\n # Run the WAMR codegen required by the tests\n codegen(ctx, \"demo\", \"echo\", wamr=True)\n codegen(ctx, \"demo\", \"chain\", wamr=True)\n\n # Run the SGX codegen required by the tests\n codegen(ctx, \"demo\", \"hello\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_a\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_b\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_c\", wamr=True, sgx=True)", "def test_models(directorio=''):\r\n \r\n print('The trained models will be tested now')\r\n start = time.time()\r\n \r\n busqueda = \"ls \" + directorio + \"/*.h5 > model_names.txt\"\r\n\r\n os.system(busqueda)\r\n\r\n X = np.load(directorio + '/Xtest.npy')\r\n diccio = np.load(directorio + '/feature_standarisation.npy').item()\r\n y = pd.read_csv(directorio + '/dbtest.csv')['target'].values\r\n\r\n X = (X - diccio['mean'])/diccio['std']\r\n x = np.reshape(X,(X.shape[0],X.shape[2]))\r\n \r\n with open('model_names.txt','r') as f:\r\n for line in f:\r\n modelo = models.load_model(line[:len(line)-1])\r\n nombre = line.split('/')[1]\r\n outpred = modelo.predict(x)\r\n prediction = outpred >= 0.5\r\n \r\n cost = -(np.dot(y,np.log10(outpred)) + \\\r\n np.dot((1-y),np.log10(1-outpred)))/y.shape[0]\r\n precision,recall,fscore,support = PRFS(y, prediction)\r\n \r\n with open(directorio + '/test_results.txt','a') as tr:\r\n tr.write(nombre + '\\n')\r\n tr.write('cost function: '+str(cost[0])+'\\n')\r\n tr.write('samples: '+str(support)+'\\n')\r\n tr.write('precision: '+str(np.round(precision*100,2))+'\\n')\r\n tr.write('recall: '+str(np.round(recall*100,2))+'\\n')\r\n tr.write('f1-score: '+str(np.round(fscore*100,2))+'\\n')\r\n tr.write('\\n')\r\n tr.close()\r\n \r\n print('The test of all trained models lasted ', round(time.time()-start,2),' s')\r\n os.system('rm model_names.txt')\r\n \r\n return", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def test_predictor():", "def test(ctx, input_file, model, output_file):\n # parse extra input args\n kwargs = {ctx.args[i][2:]: ctx.args[i+1].strip('\"') for i in range(0, len(ctx.args), 2)}\n if 'use_groups' in kwargs:\n if kwargs['use_groups']:\n no_groups = 0\n else:\n no_groups = 1\n else:\n no_groups = 1\n click.echo('Init model from: ' + model)\n model_class = MDCASClassifier.init(True, None, None)\n click.echo('Make prediction on: ' + input_file)\n pred_df = model_class.test(model_bundle_file = model, test_set_file=input_file, gt_set_file=None, input_format='joblib', verbose=True, prob=1, no_groups=no_groups)\n click.echo('Save predictions to: ' + output_file)\n model_class.export_test(output_file)\n click.echo('Saved')", "def run_model(model):\n\n model.create_initialised_input()\n\n model.run_from_buffer()\n\n output = model.output_parse()\n return output", "def run_models(request):\n job_form_data = request.session['job_form_data']\n job_wrapper = JobWrapper(job_form_data)\n job_wrapper.create_data_file()\n print job_wrapper.job_form_data\n # Must run emits to generate emis_co2.dat - this step is requried to\n # run the models and it's a lot simpler to have it run form here than\n # from a job manager script\n cmd = \"/var/opt/IMOGEN/EMITS/emits\"\n subprocess.call(cmd, shell=True)\n print \"Ran {0} program\".format(cmd)\n # Now submit the models via the job manager\n jr = DRMAAJobRunner()\n return jr.queue_job(job_wrapper)", "def test_py_compile_basic(self):\n self._test_py_compile('basic')", "def main():\n tng.api.runner()", "def main(args):\n\n print(now(), \"test_model.py main() running.\")\n\n test_log = \"clean_test_log.txt\"\n to_log_file(args, args.output, test_log)\n\n # Set device\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n ####################################################\n # Dataset\n if args.dataset.lower() == \"cifar10\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n elif args.dataset.lower() == \"cifar100\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n\n elif args.dataset.lower() == \"tinyimagenet_first\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"firsthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"firsthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_last\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"lasthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"lasthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_all\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"all\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"all\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n else:\n print(\"Dataset not yet implemented. Exiting from test_model.py.\")\n sys.exit()\n\n ####################################################\n\n ####################################################\n # Network and Optimizer\n net = get_model(args.model, args.dataset)\n\n # load model from path if a path is provided\n if args.model_path is not None:\n net = load_model_from_checkpoint(args.model, args.model_path, args.dataset)\n else:\n print(\"No model path provided, continuing test with untrained network.\")\n net = net.to(device)\n ####################################################\n\n ####################################################\n # Test Model\n training_acc = test(net, trainloader, device)\n natural_acc = test(net, testloader, device)\n print(now(), \" Training accuracy: \", training_acc)\n print(now(), \" Natural accuracy: \", natural_acc)\n stats = OrderedDict(\n [\n (\"model path\", args.model_path),\n (\"model\", args.model),\n (\"normalize\", args.normalize),\n (\"augment\", args.train_augment),\n (\"training_acc\", training_acc),\n (\"natural_acc\", natural_acc),\n ]\n )\n to_results_table(stats, args.output, \"clean_performance.csv\")\n ####################################################\n\n return", "def run_test(self):\n raise NotImplementedError", "def test_build(self):\n self.app.build()", "def run():\n print('*-----------------------------------*')\n print('Running main.py ...')\n model = MLPModel(CFG, name='tfds_tryout')\n print('* Model defined')\n model.load_data(method='tfds')\n print('* Data Loaded')\n print(model.datasetinfo)\n model.build()\n model.train()\n model.evaluate()\n model.save()", "def setup_models(self):\n pass", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def test_training():\n assert init_engine('train', [\"config=first_run_test/default.yaml\"]).run() is None", "def test_machine_learning():", "def main(_config, _run):\n sacred.commands.print_config(_run)\n dump_config_and_makefile()\n prepare_and_train()", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def run_tests(self):\n raise NotImplementedError", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def test_BuildModel0(self):\n print(\"\\nTest 4: Building a Basic Model\")\n builder = StaticBuilder(scope=\"Basic\")\n in_name = builder.addInput(10)\n enc_name = builder.addInner(3)\n out_name = builder.addOutput()\n builder.addDirectedLink(in_name, enc_name)\n builder.addDirectedLink(enc_name, out_name)\n \n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n \n builder.build()\n inn, enc, out = ( builder.nodes[in_name], builder.nodes[enc_name],\n builder.nodes[out_name] )\n self.assertEqual(inn._oslot_to_otensor[0].shape.as_list()[-1],\n enc._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")\n self.assertEqual(enc._oslot_to_otensor[0].shape.as_list()[-1],\n out._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")", "def test_deep_learning_models():\n atom = ATOMClassifier(*mnist, n_rows=0.1, random_state=1)\n pytest.raises(PermissionError, atom.clean)\n atom.run(KerasClassifier(neural_network, epochs=1, batch_size=512, verbose=0))", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def test_compile(self):\n # Get a network\n qnn = self.get_test_network(\"1q-qvm\")\n\n # Compile a data point\n executable = qnn.compile(index=0, shots=1000)\n\n # Checks\n self.assertEqual(type(executable), Program)", "def build_model():", "def test_main_integration(self, monkeypatch, mock_model, input_data):\n # mock out the classifier loading and predictions (in a valid way)\n def predict_fn(x):\n predictions = np.tile([True, None, False], math.ceil(len(x)/3))\n return predictions[:len(x)]\n\n monkeypatch.setattr(\"joblib.load\", lambda x: mock_model(predict_fn))\n # Note for future devs -- `args=[]` is VERY important to pass when\n # testing argschema modules using VSCode and probably other IDEs.\n # If not, will exit during parsing with no good error\n # (it's ingesting the command line args passed to the test runner)\n parser = InferenceParser(input_data=input_data, args=[])\n main(parser)\n # Check outputs exist (contents are mock result)\n assert os.path.exists(parser.args[\"output_json\"])", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def main():\n testlib = VorpatestLibrary()\n testlib.prepare_test()\n testlib.run_vorpaline(*sys.argv[1:])\n testlib.run_vorpastat()\n testlib.cleanup_test()", "def cli(config, data, metrics, model, loader):\n runner = TestMlRunner(config=config, data=data, metrics=metrics, model=model, loader=loader)\n print(runner.run())", "def main():\n logger.info(\"=> creating model ...\")\n logger.info(\"Classes: %s\", cfg.classes)\n\n value_scale = 255\n mean = [0.485, 0.456, 0.406]\n mean = [item * value_scale for item in mean]\n std = [0.229, 0.224, 0.225]\n std = [item * value_scale for item in std]\n gray_folder = os.path.join(cfg.result_path, 'gray')\n color_folder = os.path.join(cfg.result_path, 'color')\n\n test_transform = pt_transform.Compose([pt_transform.Normalize(mean=mean, std=std, is_train=False)])\n\n if cfg.data_root[-1] == \"/\":\n val_list = cfg.data_root + cfg.val_list\n else:\n val_list = cfg.data_root + '/' + cfg.val_list\n\n test_data = pt_dataset.SemData(\n split='val', data_root=cfg.data_root,\n data_list=val_list,\n transform=test_transform)\n\n test_loader = ds.GeneratorDataset(test_data, column_names=[\"data\", \"label\"],\n shuffle=False)\n test_loader.batch(1)\n colors = numpy.loadtxt(cfg.color_txt).astype('uint8')\n\n from src.model import cpnet\n\n CPNet = cpnet.CPNet(\n prior_channels=256,\n proir__size=60,\n am_kernel_size=11,\n pretrained=True,\n pretrained_path=cfg.pretrain_path,\n deep_base=True\n )\n\n ms_checkpoint = load_checkpoint(cfg.ckpt)\n load_param_into_net(CPNet, ms_checkpoint, strict_load=True)\n CPNet.set_train(False)\n test(test_loader, test_data.data_list, CPNet, cfg.classes, mean, std, cfg.base_size, cfg.test_h,\n cfg.test_w, cfg.scales, gray_folder, color_folder, colors)\n if cfg.split != 'test':\n cal_acc(test_data.data_list, gray_folder, cfg.classes)", "def main(_):\n if not FLAGS.model_output_dir:\n raise ValueError(\n \"Undefined model output directory. Perhaps you forgot to set the --model_output_dir flag?\")\n \n if FLAGS.predict_input_file:\n decode()\n else:\n train()", "def testgen(self):\n self.parse()\n self.generate()", "def test_setup_solvent_models():\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script = get_template_script(tmp_dir)\n\n # Setup solvation system and reduce clearance to make test faster.\n template_script['systems']['hydration-system']['solvent1'] = 'PME'\n template_script['solvents']['PME']['clearance'] = '3.0 * angstrom'\n del template_script['experiments']\n\n # Test solvent models.\n for solvent_model in ['tip3p', 'tip4pew', 'tip3pfb', 'tip5p']:\n yaml_script = copy.deepcopy(template_script)\n yaml_script['solvents']['PME']['solvent_model'] = solvent_model\n if solvent_model == 'tip3p' or solvent_model == 'tip4pew':\n solvent_parameters = ['leaprc.water.' + solvent_model]\n else:\n solvent_parameters = ['leaprc.water.tip3p', 'frcmod.' + solvent_model]\n yaml_script['solvents']['PME']['leap']['parameters'] = solvent_parameters\n yaml_script['options']['setup_dir'] = solvent_model\n exp_builder = ExperimentBuilder(yaml_script)\n\n # Infer number of expected atoms per water molecule from model.\n expected_water_n_atoms = int(list(filter(str.isdigit, solvent_model))[0])\n\n # Setup the system and check that water residues have expected number of particles.\n prmtop_filepath = exp_builder._db.get_system('hydration-system')[0].parameters_path\n topology = mdtraj.load_prmtop(prmtop_filepath)\n yield assert_equal, topology.residue(1).n_atoms, expected_water_n_atoms", "def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()", "def build_models():\n train_models()\n return build_response.sent_ok()", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def test_trainer(testsetting, w2vmodel, tweets, targets, labels, ids, tweets_test, targets_test, labels_test, ids_test, hidden_size, max_epochs, tanhOrSoftmax, dropout, modeltype=\"conditional\", targetInTweet={}, testid = \"test-1\", pretrain = \"pre_cont\", acc_thresh=0.9, sep = False):\n\n # parameters\n learning_rate = 0.0001\n batch_size = 70\n input_size = 100\n\n outfolder = \"_\".join([testid, modeltype, testsetting, \"hidden-\" + str(hidden_size), tanhOrSoftmax])\n\n # real data stance-semeval\n target_size = 3\n max_seq_length = len(tweets[0])\n if modeltype == \"conditional-reverse\":\n data = [np.asarray(targets), np.asarray(tweets), np.asarray(ids), np.asarray(labels)]\n else:\n data = [np.asarray(tweets), np.asarray(targets), np.asarray(ids), np.asarray(labels)]\n\n X = w2vmodel.syn0\n vocab_size = len(w2vmodel.vocab)\n\n if modeltype == \"concat\":\n model, placeholders = get_model_concat(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"tweetonly\":\n model, placeholders = get_model_tweetonly(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n data = [np.asarray(tweets), np.asarray(ids), np.asarray(labels)]\n elif modeltype == \"conditional\" or modeltype == \"conditional-reverse\":\n # output of get_model(): model, [inputs, inputs_cond]\n model, placeholders = get_model_conditional(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"bicond\":\n model, placeholders = get_model_bidirectional_conditioning(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"conditional-target-feed\":\n model, placeholders = get_model_conditional_target_feed(batch_size, max_seq_length, input_size, hidden_size,\n target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"bicond-sepembed\":\n model, placeholders = get_model_bicond_sepembed(batch_size, max_seq_length, input_size, hidden_size,\n target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n sep = True\n\n ids = tf.placeholder(tf.float32, [batch_size, 1], \"ids\") #ids are so that the dev/test samples can be recovered later since we shuffle\n targets = tf.placeholder(tf.float32, [batch_size, target_size], \"targets\")\n\n\n loss = tf.nn.softmax_cross_entropy_with_logits(model, targets) # targets: labels (e.g. pos/neg/neutral)\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n batcher = BatchBucketSampler(data, batch_size)\n acc_batcher = BatchBucketSampler(data, batch_size)\n\n placeholders += [ids]\n placeholders += [targets]\n\n pad_nr = batch_size - (\n len(labels_test) % batch_size) + 1 # since train/test batches need to be the same size, add padding for test\n\n # prepare the testing data. Needs to be padded to fit the batch size.\n if modeltype == \"tweetonly\":\n data_test = [np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n elif modeltype == \"conditional-reverse\":\n data_test = [np.lib.pad(np.asarray(targets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n else:\n data_test = [np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(targets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n\n corpus_test_batch = BatchBucketSampler(data_test, batch_size)\n\n\n with tf.Session() as sess:\n summary_writer = tf.train.SummaryWriter(\"./out/save\", graph_def=sess.graph_def)\n\n hooks = [\n SpeedHook(summary_writer, iteration_interval=50, batch_size=batch_size),\n SaveModelHookDev(path=\"../out/save/\" + outfolder, at_every_epoch=1),\n SemEvalHook(corpus_test_batch, placeholders, 1),\n LossHook(summary_writer, iteration_interval=50),\n AccuracyHook(summary_writer, acc_batcher, placeholders, 2),\n AccuracyHookIgnoreNeutral(summary_writer, acc_batcher, placeholders, 2)\n ]\n\n trainer = Trainer(optimizer, max_epochs, hooks)\n epoch = trainer(batcher=batcher, acc_thresh=acc_thresh, pretrain=pretrain, embedd=X, placeholders=placeholders,\n loss=loss, model=model, sep=sep)\n\n print(\"Applying to test data, getting predictions for NONE/AGAINST/FAVOR\")\n\n predictions_detailed_all = []\n predictions_all = []\n ids_all = []\n\n load_model_dev(sess, \"../out/save/\" + outfolder + \"_ep\" + str(epoch), \"model.tf\")\n\n total = 0\n correct = 0\n for values in corpus_test_batch:\n total += len(values[-1])\n feed_dict = {}\n for i in range(0, len(placeholders)):\n feed_dict[placeholders[i]] = values[i]\n truth = np.argmax(values[-1], 1) # values[2] is a 3-length one-hot vector containing the labels\n if pretrain == \"pre\" and sep == True: # this is a bit hacky. To do: improve\n vars = tf.all_variables()\n emb_var = vars[0]\n emb_var2 = vars[1]\n sess.run(emb_var.assign(X))\n sess.run(emb_var2.assign(X))\n if pretrain == \"pre\": # this is a bit hacky. To do: improve\n vars = tf.all_variables()\n emb_var = vars[0]\n sess.run(emb_var.assign(X))\n predictions = sess.run(tf.nn.softmax(model), feed_dict=feed_dict)\n predictions_detailed_all.extend(predictions)\n ids_all.extend(values[-2])\n predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1),\n feed_dict=feed_dict)\n predictions_all.extend(predicted)\n correct += sum(truth == predicted)\n\n print(\"Num testing samples \" + str(total) +\n \"\\tAcc \" + str(float(correct)/total) +\n \"\\tCorrect \" + str(correct) + \"\\tTotal \" + str(total))\n\n\n # postprocessing\n if targetInTweet != {}:\n\n predictions_new = []\n ids_new = []\n it = 0\n for pred_prob in predictions_detailed_all:\n id = ids_all[it]\n if id == 0.0:\n it += 1\n continue\n inTwe = targetInTweet[id.tolist()[0]]\n if inTwe == True: #and (pred_prob[2] > 0.1 or pred_prob[1] > 0.1): #NONE/AGAINST/FAVOUR\n #print(str(id), \"inTwe!\")\n pred = 1\n if pred_prob[2] > pred_prob[1]:\n pred = 2\n predictions_new.append(pred)\n else:\n plist = pred_prob.tolist()\n pred = plist.index(max(plist))\n predictions_new.append(pred)\n it += 1\n ids_new.append(id)\n return predictions_new, predictions_detailed_all, ids_new\n\n return predictions_all, predictions_detailed_all, ids_all", "def test_multiple_base_models(engine):\n with pytest.raises(ValueError):\n bloop.model.BaseModel(engine)", "def unitary_test():", "def my_main() -> None: # pragma: no cover\n universe = AutomataUniverse(SQUARE_GRID_NEIGHBORS, [2,3], [3])\n instance = AutomataTransforms(universe)\n assert isinstance(instance, AutomataTransforms)\n # # _is_rot_mat_test(instance)\n # # _rotations_check(instance)\n # # _prime_cells_check(instance)\n # _check_transform_test(instance)\n # # _hashable_transform_test(instance)\n # _duplicate_test(instance)\n # _collision_test(instance)\n # _end_cycle_test(instance)\n # _add_transform_test(instance)\n # instance.generate_combination_transforms()\n\n # # _matrix_rotate_test(instance)\n # # _duplicate_test(instance) # test again after transform(s) added\n # # _collision_test(instance) # test again after transform(s) added «also refactoring»\n # instance.dbg_report_instance() # DEBUG", "def main():\n # Get datasets\n train_dataset, test_dataset = get_datasets()\n\n # Build neural network\n layers = [tf.keras.layers.Dense(22, activation='sigmoid'),\n tf.keras.layers.Dense(30, activation='sigmoid'),\n tf.keras.layers.Dense(1, activation='sigmoid')]\n\n model = tf.keras.models.Sequential(layers)\n model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['accuracy'])\n\n model.fit(train_dataset, epochs=10)\n\n # Test model\n model.evaluate(test_dataset, verbose=2)", "def setUp(self):\n self.base1 = BaseModel()", "def test_run(self):\n sut = ExperimentEmail()\n train = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n val = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n outdir = tempfile.mkdtemp()\n\n # Act\n sut.run(train, val, outdir, batch_size=32, epochs=2)", "def model_switch_to_testing(self):\n pass", "def setUp(self):\n self.vmodel_name = \"LABasin\"\n self.sim_id = int(seqnum.get_seq_num())\n self.install = InstallCfg()\n self.vmodel_obj = vmodels.get_velocity_model_by_name(self.vmodel_name)\n\n indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))\n tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))\n outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))\n logdir = os.path.join(self.install.A_OUT_LOG_DIR, str(self.sim_id))\n # Create all directories\n bband_utils.mkdirs([indir, tmpdir, outdir, logdir], print_cmd=False)\n\n # Copy needed files\n\n # src file\n r_src_file = \"nr_v12_11_0_fs.src\"\n src_file = os.path.join(self.install.A_TEST_REF_DIR, \"uwo\", r_src_file)\n self.src_file = os.path.join(indir, r_src_file)\n cmd = \"cp %s %s\" % (src_file, self.src_file)\n bband_utils.runprog(cmd)\n\n # exsim param template file\n vmodel_params = self.vmodel_obj.get_codebase_params('exsim')\n self.failIf('GENERIC_PARAM' not in vmodel_params)\n r_param_template = vmodel_params['GENERIC_PARAM']\n\n self.failIf(r_param_template == \"\" or r_param_template is None)\n param_template = os.path.join(self.vmodel_obj.base_dir,\n r_param_template)\n # r_param_template is relative to the velocity model basedir,\n # get only basename\n r_param_template = os.path.basename(r_param_template)\n self.param_template = os.path.join(indir, r_param_template)\n cmd = \"cp %s %s\" % (param_template, self.param_template)\n bband_utils.runprog(cmd)\n\n # station file\n r_stations = \"nr_v12_11_2.stl\"\n stations = os.path.join(self.install.A_TEST_REF_DIR, \"uwo\", r_stations)\n self.stations = os.path.join(indir, r_stations)\n cmd = \"cp %s %s\" % (stations, self.stations)\n bband_utils.runprog(cmd)", "def test_1_make(self):\n #We can compile all these modules together into a single shared library.\n writer = self.writers.values()[0]\n self.code = writer.make(remake=True, dependencies=self.dependencies)\n self.assertEqual(self.code, 0)", "def compile(self):\n self.model = compile_model(self.model_filename, include_paths = self.include_paths)\n self.simulation = compile_model(self.sim_filename, include_paths = self.include_paths)" ]
[ "0.7452091", "0.7298708", "0.7190168", "0.6873378", "0.6621304", "0.6594363", "0.65664387", "0.6546556", "0.651935", "0.65192693", "0.65000486", "0.6479922", "0.64395165", "0.6420277", "0.64168084", "0.64134866", "0.6403398", "0.6369678", "0.6283531", "0.62546074", "0.62540376", "0.6234562", "0.62240845", "0.61559314", "0.613173", "0.6131119", "0.61097693", "0.6106867", "0.60677224", "0.6061389", "0.6053786", "0.60297126", "0.60143566", "0.6002759", "0.5997726", "0.59905434", "0.5987972", "0.5983996", "0.5978899", "0.59786636", "0.59762144", "0.597512", "0.5971859", "0.59710896", "0.5958879", "0.595463", "0.59513766", "0.59464824", "0.5942646", "0.59389615", "0.5935943", "0.59329575", "0.5926761", "0.5918811", "0.5909379", "0.590614", "0.5900919", "0.58970386", "0.5891472", "0.5884933", "0.5882818", "0.58801734", "0.5878154", "0.5878089", "0.58724225", "0.5869584", "0.58688784", "0.58610594", "0.5860987", "0.5860987", "0.5858304", "0.5851542", "0.5848572", "0.58473885", "0.5840644", "0.5839819", "0.5837582", "0.5836673", "0.5831048", "0.5831048", "0.58306175", "0.5827939", "0.5813909", "0.5813337", "0.5809369", "0.5802946", "0.5802782", "0.5802761", "0.58023095", "0.579517", "0.57925725", "0.57908946", "0.57884413", "0.5788381", "0.57858807", "0.5784191", "0.5780888", "0.5775115", "0.57715625", "0.576802" ]
0.66533005
4
Produces the min,max for a give data type.
def get_dtype_range(dtype: str) -> Tuple[int, int]: type_info = None np_dtype = np.dtype(dtype) kind = np_dtype.kind if kind == "f": type_info = np.finfo(np_dtype) elif kind in ["i", "u"]: type_info = np.iinfo(np_dtype) else: raise TypeError(f"dtype ({dtype}) must indicate some floating-point or integral data type.") return type_info.min, type_info.max
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_max(self, data, era):\n return 0, np.max(data)", "def GetFieldMinMax(fielddef):\n minmax = {'c': (0, 0xff),\n '?': (0, 1),\n 'b': (~0x7f, 0x7f),\n 'B': (0, 0xff),\n 'h': (~0x7fff, 0x7fff),\n 'H': (0, 0xffff),\n 'i': (~0x7fffffff, 0x7fffffff),\n 'I': (0, 0xffffffff),\n 'l': (~0x7fffffff, 0x7fffffff),\n 'L': (0, 0xffffffff),\n 'q': (~0x7fffffffffffffff, 0x7fffffffffffffff),\n 'Q': (0, 0x7fffffffffffffff),\n 'f': (sys.float_info.min, sys.float_info.max),\n 'd': (sys.float_info.min, sys.float_info.max),\n }\n format_ = GetFieldDef(fielddef, fields='format_')\n min_ = 0\n max_ = 0\n\n if format_[-1:] in minmax:\n min_, max_ = minmax[format_[-1:]]\n max_ *= GetFormatCount(format_)\n elif format_[-1:].lower() in ['s','p']:\n # s and p may have a prefix as length\n max_ = GetFormatCount(format_)\n\n return min_,max_", "def minmax(data, fields):\n vmin = min(data[field].min() for field in fields)\n vmax = max(data[field].max() for field in fields)\n return dict(vmin=vmin, vmax=vmax)", "def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes", "def min_max(xs):\n return min(xs), max(xs)", "def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value", "def get_min_max(ints):\r\n if len(ints) == 0:\r\n return None\r\n max = ints[0]\r\n min = ints[0]\r\n\r\n for int in ints:\r\n if int < min:\r\n min = int\r\n if int > max:\r\n max = int\r\n \r\n return min, max", "def minmax(data):\n smallest = data[0]\n largest = data[0]\n\n for i in range(0,len(data)):\n if data[i] < smallest:\n smallest = data[i]\n elif data[i] > largest:\n largest = data[i]\n\n return(smallest,largest)", "def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans", "def min_max_date(self, min, max, date):\n\t\tif not min or min > date:\n\t\t\tmin = date\n\n\t\tif not max or max < date:\n\t\t\tmax = date\n\n\t\treturn min, max", "def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv", "def get_min_max_tuple(min_max_tuple, value):\n min_v, max_v = min_max_tuple\n\n min_v = smart_min(min_v, value)\n max_v = smart_max(max_v, value)\n\n return (min_v, max_v)", "def minmax(xs):\n min_val = None\n max_val = None\n for x in xs:\n if min_val is None or x < min_val:\n min_val = x\n if max_val is None or x > max_val:\n max_val = x\n return (min_val, max_val)", "def get_min_max(ints):\n if not ints:\n return\n max = ints[0]\n min = ints[0]\n\n\n for i in ints:\n if i > max:\n max = i\n if i < min:\n min = i\n return (min, max)", "def glGetMinmax( baseFunction, target, reset, format, type, values=None):\r\n if values is None:\r\n width = 2\r\n values = images.images.SetupPixelRead( format, (width,4), type )\r\n arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[\r\n images.images.TYPE_TO_ARRAYTYPE.get(type,type)\r\n ]\r\n baseFunction(\r\n target, reset, format, type,\r\n ctypes.c_void_p( arrayType.dataPointer(values))\r\n )\r\n return values", "def get_min_max(ints):\n if ints is None or len(ints) == 0:\n return None\n \n min_value = ints[0]\n max_value = ints[0]\n\n for value in ints:\n if value < min_value:\n min_value = value\n\n if value > max_value:\n max_value = value\n\n return (min_value, max_value)", "def get_min_max(ints):\n if not ints:\n return None, None\n if len(ints) ==None:\n return None\n min_val = float(\"inf\")\n max_val = -float(\"inf\")\n # for each int in ints if update max_val and min_val accordingly\n for integer in ints:\n if integer > max_val:\n max_val = integer\n\n if integer < min_val:\n min_val = integer\n \n return (min_val, max_val)", "def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])", "def min_max(my_list):\n print(\"Min = \",min(my_list,key = abs))\n print(\"Max = \",max(my_list,key = abs))", "def get_min_max(ints):\n if len(ints) <= 0:\n return ()\n min_value = ints[0]\n max_value = ints[0]\n for i in range(len(ints)):\n temp = ints[i]\n if temp <= min_value:\n min_value = temp\n if temp >= max_value:\n max_value = temp\n output = (min_value, max_value)\n# print(\"output: \", output)\n return output\n pass", "def min_max_outliers(res, min=None, max=None):\n min_max_list = []\n if isinstance(min, (int, float)):\n data1 = res[res < min].reset_index()\n data1['limit type'] = 'minimum'\n data1['limit'] = min\n min_max_list.append(data1)\n if isinstance(max, (int, float)):\n data1 = res[res > max].reset_index()\n data1['limit type'] = 'maximum'\n data1['limit'] = max\n min_max_list.append(data1)\n\n min_max1 = pd.concat(min_max_list)\n\n return min_max1", "def min_max(items):\n return min(items), max(items)", "def min_max(input):\r\n return tuple(sorted(input)[:1]+sorted(input)[-1:]) # write a line of code to return containing min and max\r\n #tuple(input[:1]+input[-1:]) --------------- it works for the sorted lists\r\n #tuple(sorted(input)[:1]+sorted(input)[-1:]) ---------------it works for any input---slicing lists and concatinating\r\n #tuple(sorted(input))[:1]+tuple(sorted(input))[-1:]----------------it works same as the above----slicing tuples and concatinating them\r", "def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]", "def get_minmax_stats(dataframe, variable):\n\n print(\"Maximum value of \", variable, \"is: \", dataframe[variable].max())\n print(\"Minimum value of \", variable, \"is: \", dataframe[variable].min())", "def get_min_max(ints):\n\n if len(ints) == 0:\n return (None,None)\n min = ints[0]\n max = ints[0]\n for x in range(1,len(ints)):\n if ints[x] > max:\n max=ints[x]\n elif ints[x] < min:\n min = ints[x]\n return (min,max)", "def getMinMax(self,arr):\n minz=arr['zmg']-arr['sigma_pz']*5\n dmin=self.zcat-5*self.sigmacat\n minz[np.where(minz>dmin)]=dmin\n maxz=arr['zmg']+arr['sigma_pz']*5\n dax=self.zcat+5*self.sigmacat\n maxz[np.where(maxz<dmax)]=dmax\n return dmin,dmax", "def _computeRangeFromData(data):\n if data is None:\n return None\n\n dataRange = min_max(data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n return None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n return dataRange.minimum, min_positive, dataRange.maximum", "def get_min_max(ints):\n if ints == None or len(ints) == 0:\n return None\n if len(ints) == 1:\n return (ints[0],ints[0])\n \n max = ints[0]\n min = ints[0]\n for i in range(1, len(ints)):\n if ints[i] > max:\n max = ints[i]\n if ints[i] < min:\n min = ints[i]\n return (min,max)", "def _assign_min_max(self, val):\n if val in self.xml_json_num_map:\n return self.xml_json_num_map[val]\n elif val[-2:] == '.0':\n # or int(float(val))\n # or printf-style\n return val[:-2]\n else:\n return val", "def get_min_max(ints):\n if len(ints) == 0:\n return None, None\n \n min_e = ints[0]\n max_e = ints[-1]\n for e in ints:\n if isinstance(e, int) == False: # if the list includes non-integer number, do not find min, max \n return None,None\n if e < min_e:\n min_e = e\n if e > max_e:\n max_e = e\n return min_e, max_e", "def min_max(tr):\n tr = tr.astype(float)\n mm = tr.min() / tr.max()\n return mm", "def min_max(arr: StaticArray) -> ():\n minimum = arr.get(0) # sets min to first element\n maximum = arr.get(0) # sets max to first element\n # iterate over the elements in the array to check for < or >\n for index in range(arr.size()):\n if arr[index] < minimum: # if element is less than the current min, min = new element\n minimum = arr[index]\n elif arr[index] > maximum: # if element is greater than the current max, max = new element\n maximum = arr[index]\n return minimum, maximum", "def redshift_range_type(s):\n try:\n return tuple(map(float, s.split(',')))\n except:\n raise TypeError(\"redshift range must be zmin,zmax\")", "def get_min_max(nums):\n assert(type(nums) == list), \"nums has to be a list\"\n assert(len(nums) > 0), \"get_min_max() arg is an empty sequence\"\n min_ = nums[0]\n max_ = nums[0]\n for n in nums:\n assert(type(n) == int), \"numbers in the list have to be an integer\"\n if n < min_:\n min_ = n\n if n > max_:\n max_ = n\n return (min_, max_)", "def get_dyn_range(scale, zero_point, dtype):\n if dtype == torch.quint8:\n min_val, max_val = 0, 255\n elif dtype == torch.qint8:\n min_val, max_val = -128, 127\n else:\n raise RuntimeError(f\"Unsupported quantized dtype {dtype}\")\n\n return (min_val - zero_point) * scale, (max_val - zero_point) * scale", "def get_min_max_x(self, min_x = 1e9, max_x = -1e9, exclude = []): \n \n if self.verbose > 1:\n print(\"MultiLinearSpectra.get_min_max_x()\") \n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n min_x, max_x = self.mess[m][\"object\"].get_min_max_x(min_x, max_x)\n \n return min_x, max_x", "def column_max_min(data, column, sep_type=\"\", skip_rows=0):\n\n if sep_type==\"\":\n info=pd.read_csv(data, skiprows=skip_rows, header=None, delim_whitespace=True) \n else:\n info=pd.read_csv(data, skiprows=skip_rows, header=None, sep=sep_type)\n\n print(\"Min: %f\" %(info[column].min()))\n print(\"Max: %f\" %(info[column].max()))", "def smallest_int_type_for_range(minimum, maximum):\n signed = minimum < 0\n abs_max = max(maximum, abs(minimum))\n if signed:\n if abs_max < 1 << 7:\n return numpy.int8\n elif abs_max < 1 << 15:\n return numpy.int16\n elif abs_max < 1 << 31:\n return numpy.int32\n else:\n if abs_max < 1 << 8:\n return numpy.uint8\n elif abs_max < 1 << 16:\n return numpy.uint16\n elif abs_max < 1 << 32:\n return numpy.uint32\n # Return default integer type (other than in the exceptional case that the\n # value is too big to store in a signed 64-bit int)\n if not signed and abs_max > 1 << 63:\n return numpy.uint64\n else:\n return numpy.int64", "def _set_min_max_values(self):\n\n p_1, p_2 = self.points[0], self.points[1]\n nb_dim = len(p_1.values)\n self._min_values = []\n self._max_values = []\n for d in range(nb_dim):\n d_min = min(p_1[d], p_2[d])\n d_max = max(p_2[d], p_2[d])\n self._min_values.append(d_min)\n self._max_values.append(d_max)", "def get_minmax(self, stmt, slist):\n minel = maxel = None\n for s in slist:\n if s.keyword == \"min-elements\":\n minel = s.arg\n elif s.keyword == \"max-elements\":\n maxel = s.arg\n if minel is None:\n minst = stmt.search_one(\"min_elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max_elements\")\n if maxst:\n maxel = maxst.arg\n return (minel, maxel)", "def getValidRatingInputs(self):\n min = self.minRatingInput.get()\n max = self.maxRatingInput.get()\n\n try:\n min = int(min)\n except ValueError:\n min = 0\n\n try:\n max = int(max)\n except ValueError:\n max = 100\n\n return min, max", "def get_etype_2_minmax_funcEnum(entitytype_arr):\n etype_2_minmax_funcEnum = {}\n s = pd.Series(entitytype_arr)\n for name, group in s.groupby(s):\n etype_2_minmax_funcEnum[name] = (min(group.index), max(group.index))\n return etype_2_minmax_funcEnum", "def findmaxmin(input_file):\n\tE_list = sub.check_output(\"check_maxmin.py {}\".format(input_file), shell=True).decode(\"utf-8\")\n\tEmax = float(re.search(r\"Maximum\\s*:\\s*(([+-]|\\s)\\d*\\.\\d+)\", E_list).group(1))\n\tEmin = float(re.search(r\"Minimum\\s*:\\s*(([+-]|\\s)\\d*\\.\\d+)\", E_list).group(1))\n\treturn Emax, Emin", "def parse_vmin_vmax(container, field, vmin, vmax):\n field_dict = container.fields[field]\n field_default_vmin, field_default_vmax = get_field_limits(field)\n if vmin is None:\n if \"valid_min\" in field_dict:\n vmin = field_dict[\"valid_min\"]\n else:\n vmin = field_default_vmin\n if vmax is None:\n if \"valid_max\" in field_dict:\n vmax = field_dict[\"valid_max\"]\n else:\n vmax = field_default_vmax\n return vmin, vmax", "def minMaxFonc(liste):\n\n return min(liste), max(liste)", "def _get_extremes(self, attr='values'):\n # calculate the maximum and minimum for all series\n series_max = [0]\n series_min = [0]\n for s in self:\n if s is not None:\n series_max.append(s.max(attr))\n series_min.append(s.min(attr))\n return min(series_min), max(series_max)", "def get_min_max(self) -> tuple:\r\n\r\n minimum = float(\"inf\")\r\n maximum = float(\"-inf\")\r\n\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n for name, data in self.trees_data.items():\r\n if self.trees[name][\"point_helper\"] is None:\r\n mapping = self.trees[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n return minimum, maximum", "def get_min_max(self, groups, key):\n group = groups.get_group(key)\n min = group.loc[group[\"dif\"].idxmin()]\n max = group.loc[group[\"dif\"].idxmax()]\n minmax = {\"min\": min, \"max\": max}\n return minmax", "def min_max(lst):\r\n my_min = None\r\n my_max = None\r\n for num in lst:\r\n if (my_min and my_max) is not None:\r\n # recalculate running min and max:\r\n if num < my_min:\r\n my_min = num\r\n continue\r\n if num > my_max:\r\n my_max = num\r\n else:\r\n my_min = num\r\n my_max = num\r\n ans = (my_min, my_max)\r\n return ans", "def find_minmax(lims, olims):\n\n limzip = zip(list(lims), list(olims), [np.min, np.max])\n return tuple([float(fn([l, ol])) for l, ol, fn in limzip])", "def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)", "def form_MinMaxRange(request):\n schema = schemaish.Structure()\n schema.add('minmax', schemaish.Float(validator=validatish.Range(min=4,max=8)))\n\n form = formish.Form(schema, 'form')\n return form", "def my_min(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[0]\n\n if not args:\n raise ValueError(\"Can't find min, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find min, wrong data format\")\n return sorter(args)", "def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax", "def vmnmx ( self , var , vmin , vmax ) :\n if var.xminmax() :\n vmn , vmx = var.xminmax ()\n if is_good_number ( vmin ) : vmin = max ( vmin , vmn )\n else : vmin = vmn\n if is_good_number ( vmax ) : vmax = min ( vmax , vmx )\n else : vmax = vmx\n\n assert is_good_number ( vmin ), 'Invalid type of ``min'' %s/%s' % ( vmin , type ( vmin ) )\n assert is_good_number ( vmax ), 'Invalid type of ``max'' %s/%s' % ( vmin , type ( vmin ) )\n assert vmin < vmax, 'Invalid min/max range: %s/%s' % ( vmin , vmax )\n \n return vmin , vmax", "def set_physical_minmax(self, min, max):\n # This allows you to set the min and the max of the quantity that you want the MLP to measure. \n # Once you set this, you can pass in a physical number to get_mlp_value() and it will be mapped to an MLP value and returned\n pass\n \n # Maybe we should let the holder of the MLP determine these values and do the mapping? ", "def minMaxBoucle(liste):\n minimum = float(\"inf\")\n maximum = -float(\"inf\")\n\n for index in range(0, 5):\n liste[index] = int(liste[index])\n if liste[index] > maximum:\n maximum = liste[index]\n if liste[index] < minimum:\n minimum = liste[index]\n return minimum, maximum", "def get_min_max(ints):\n print(f\"input arr is {ints}\")\n max=0\n min=len(ints)-1\n for i in range(1,len(ints)):\n if ints[i]>ints[max]:\n temp=ints[i]\n ints[i]=ints[max]\n ints[max]=temp\n if ints[i]<ints[min]:\n temp=ints[i]\n ints[i]=ints[min]\n ints[min]=temp\n #print(f\"max value is {ints[max]}\")\n #print(f\"min value is {ints[min]}\")\n return(ints[min],ints[max])", "def find_min_max(x):\n if not x: return None\n if len(x) == 1:\n return x[0], x[0] # the first is min and the second is max\n min_val = x[0]\n max_val = x[0]\n for i in xrange(1, len(x)):\n if x[i] < min_val:\n min_val = x[i]\n elif x[i] > max_val:\n max_val = x[i]", "def get_max_min_time(series: object, aggregation: str) -> dict:\n\n dates = pd.to_datetime(series)\n\n if aggregation == 'year':\n\n return {'max': dates.max().year,\n 'min': dates.min().year}\n\n elif aggregation is None:\n\n return {'max': dates.max(),\n 'min': dates.min()}", "def get_features_min_max(self):\n min_max_list = []\n\n # Get each feature's min and max values.\n for feature_name in self.feature_names:\n min = self.data[feature_name].min()\n max = self.data[feature_name].max()\n min_max_list.append([min, max])\n\n # Create dataframe from list of lists in correct format\n min_max_df = pd.DataFrame(min_max_list)\n min_max = min_max_df.T\n min_max.columns = self.feature_names\n min_max.index = ['min', 'max']\n\n return min_max", "def find_max_min(number):\n if max(number) == min(number):\n return [len(number)]\n return [min(number), max(number)]", "def extreme_values(self, extreme):\n\n\t\tif extreme.lower() == 'min':\n\t\t\treturn data.min()\n\t\telif extreme.lower() == 'max':\n\t\t\treturn data.max()\n\t\telse:\n\t\t\tassert 'Invalid Parameter !'", "def find_min_max(x, y, xmin, xmax, ymin, ymax, zoomout=0.05):\n if len(x) != 0:\n newxmin, newxmax = np.min(x), np.max(x)\n diffx = newxmax - newxmin\n if newxmin < xmin:\n xmin = newxmin - zoomout * diffx\n if newxmax > xmax:\n xmax = newxmax + zoomout * diffx\n\n if len(y) != 0:\n newymin, newymax = np.min(y), np.max(y)\n diffy = newymax - newymin\n if newymin < ymin:\n ymin = newymin - zoomout * diffy\n if newymax > ymax:\n ymax = newymax + zoomout * diffy\n return xmin, xmax, ymin, ymax", "def minmax ( self , nshoots = 100000 ) :\n ## try to get minmax directly from pdf/function \n if self.tricks and hasattr ( self.pdf , 'function' ) :\n if hasattr ( self.pdf , 'setPars' ) : self.pdf.setPars() \n f = self.pdf.function()\n if hasattr ( f , 'minmax' ) :\n try :\n mn , mx = f.minmax()\n if 0<= mn and mn <= mx and 0 < mx : \n return mn , mx\n except :\n pass\n if hasattr ( f , 'max' ) :\n try :\n mx = f.max()\n if 0 < mx : return 0 , mx\n except :\n pass\n\n ## check RooAbsReal functionality\n code = self.pdf.getMaxVal( ROOT.RooArgSet ( self.xvar , self.yvar ) )\n if 0 < code :\n mx = self.pdf.maxVal ( code )\n if 0 < mx : return 0 , mx\n \n ## not try to use random\n \n mn , mx = -1 , -10\n if hasattr ( self.pdf , 'min' ) : mn = self.pdf.min()\n if hasattr ( self.pdf , 'max' ) : mx = self.pdf.max()\n if 0 <= mn and mn <= mx and 0 < mx : return mn , mx\n \n if not self.xminmax() : return ()\n if not self.yminmax() : return ()\n \n mn , mx = -1 , -10\n xmn , xmx = self.xminmax()\n ymn , ymx = self.yminmax()\n for i in range ( nshoots ) : \n xx = random.uniform ( xmn , xmx )\n yy = random.uniform ( ymn , ymx )\n with SETVAR ( self.xvar ) :\n with SETVAR ( self.yvar ) :\n self.xvar.setVal ( xx )\n self.yvar.setVal ( yy )\n vv = self.pdf.getVal()\n if mn < 0 or vv < mn : mn = vv\n if mx < 0 or vv > mx : mx = vv\n \n return mn , mx", "def num_spec(\n tag: Tag = \"num\",\n type_: Union[Type, Tuple[Type, ...]] = (float, int),\n min_: Union[complex, float, int, None] = None,\n max_: Union[complex, float, int, None] = None,\n conformer: Optional[Conformer] = None,\n) -> Spec:\n\n @pred_to_validator(f\"Value '{{value}}' is not type {type_}\", complement=True)\n def is_numeric_type(x: Any) -> bool:\n return isinstance(x, type_)\n\n validators = [is_numeric_type]\n\n if min_ is not None:\n\n @pred_to_validator(f\"Number '{{value}}' is smaller than minimum {min_}\")\n def num_meets_min(x: Union[complex, float, int]) -> bool:\n return x < min_ # type: ignore\n\n validators.append(num_meets_min)\n\n if max_ is not None:\n\n @pred_to_validator(f\"String '{{value}}' exceeds maximum length {max_}\")\n def num_under_max(x: Union[complex, float, int]) -> bool:\n return x > max_ # type: ignore\n\n validators.append(num_under_max)\n\n if min_ is not None and max_ is not None:\n if min_ > max_: # type: ignore\n raise ValueError(\"Cannot define a spec with min greater than max\")\n\n return ValidatorSpec.from_validators(tag, *validators, conformer=conformer)", "def clamp(self, value, minVal, maxVal):\n if type(value) is type(\"string\"):\n return value\n if minVal != None and max != None:\n return max(min(value, maxVal), minVal)\n if minVal != None and maxVal == None:\n return max(value, minVal)\n if minVal == None and maxVal != None:\n return min(value, maxVal)\n return value", "def new_ratio_max_min(metric_id_to_list_of_values: Dict[iter8id, Iterable[float]]):\n max_min_lists = {\n metric_id: [None, None] for metric_id in metric_id_to_list_of_values\n }\n logger.debug(\"mert metricid\")\n logger.debug(metric_id_to_list_of_values)\n for metric_id in metric_id_to_list_of_values:\n try:\n max_min_lists[metric_id][0], max_min_lists[metric_id][1] = min(metric_id_to_list_of_values[metric_id]), max(metric_id_to_list_of_values[metric_id])\n except:\n logger.debug(\"Empty list of values found for metric %s\", metric_id)\n \n max_min_lists[metric_id] = RatioMaxMin(\n minimum = max_min_lists[metric_id][0],\n maximum = max_min_lists[metric_id][1]\n )\n \"\"\"if the list of values is empty for a metric id, return None values for max and min\n \"\"\"\n \n return max_min_lists", "def limits(array, names):\n\n args = ['%s(%s)' % (f, n)\n for n in names\n for f in ['min', 'max']]\n result = array.afl.aggregate(array, *args).toarray()\n return dict((n, (int(result['%s_min' % n][0]), int(result['%s_max' % n][0])))\n for n in names)", "def getColMinMax(table, col):\n\tvmin = None\n\tvmax = None\n\tfor rec in table:\n\t\tvalue = rec[col]\n\t\tif vmin is None:\n\t\t\tvmin = value\n\t\t\tvmax = value\n\t\telse:\n\t\t\tif value < vmin:\n\t\t\t\tvmin = value\n\t\t\telif value > vmax:\n\t\t\t\tvmax = value\n\treturn (vmin, vmax, vmax - vmin)", "def number_range(low, high, obj_type=int):\n\n def number_range_parser(argument):\n try:\n argument = obj_type(argument)\n except ValueError:\n argparse.ArgumentError('Must be of type {}'.format(obj_type.__name__))\n\n if low <= argument <= high:\n return argument\n else:\n parser.error('Value is not in the range of {} and {}'.format(low, high))\n\n return number_range_parser", "def __init__(self, values=None, min_value=None, max_value=None):\n if values is not None:\n # If list if provided, use it to determine min and max values\n self.min = min(values)\n self.max = max(values)\n\n else:\n self.min = min_value\n self.max = max_value", "def data_range(x):\n return max(x)-min(x)", "def GetMax(val, maximum):\n\tval = float(val)\n\tmaximum = float(maximum)\n\treturn max([val, maximum])", "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def _limit(value, min_value, max_value):\n\n if value < min_value:\n return min_value\n if value > max_value:\n return max_value\n return value", "def calculate_avg_min_max(temps):\n\n temp_average = sum(temps) / len(temps)\n return temp_average, min(temps), max(temps)", "def _check_tmin_tmax(inst, tmin, tmax):\n _check_type(tmin, (None, \"numeric\"), item_name=\"tmin\")\n _check_type(tmax, (None, \"numeric\"), item_name=\"tmax\")\n\n # check positiveness for tmin, tmax\n for name, arg in ((\"tmin\", tmin), (\"tmax\", tmax)):\n if arg is None:\n continue\n if arg < 0:\n raise ValueError(\n f\"Argument '{name}' must be positive. \" f\"Provided '{arg}'.\"\n )\n # check tmax is shorter than instance\n if tmax is not None and inst.times[-1] < tmax:\n raise ValueError(\n \"Argument 'tmax' must be shorter than the instance \"\n f\"length. Provided: '{tmax}', larger than \"\n f\"{inst.times[-1]}s instance.\"\n )\n # check that tmax is larger than tmin\n if tmax is not None and tmin is not None and tmax <= tmin:\n raise ValueError(\n \"Argument 'tmax' must be strictly larger than 'tmin'. \"\n f\"Provided 'tmin' -> '{tmin}' and 'tmax' -> '{tmax}'.\"\n )\n # check that tmin is shorter than instance\n if tmin is not None and inst.times[-1] <= tmin:\n raise ValueError(\n \"Argument 'tmin' must be shorter than the instance \"\n f\"length. Provided: '{tmin}', larger than \"\n f\"{inst.times[-1]}s instance.\"\n )\n return tmin, tmax", "def find_minmax_criteria(self, data):\r\n found = {}\r\n data = dict(data)\r\n for k in data.keys():\r\n m = re.match(r'(?P<minmax>min|max)\\[(?P<property_id>\\d+)\\]', k)\r\n if m is not None:\r\n minmax = m.group('minmax')\r\n property_id = int(m.group('property_id'))\r\n if not found.has_key(property_id):\r\n found[property_id] = MinMaxCriteria(property_id)\r\n if minmax == 'min':\r\n found[property_id].min_value = self.__to_value(data[k])\r\n elif minmax == 'max':\r\n found[property_id].max_value = self.__to_value(data[k])\r\n self.minmax_criteria = found", "def find_max_min(self, col):\n self.max = max(col)\n self.min = min(col)", "def transform(data, dmin, dmax, dformat):\n\n if dformat == 'UV8':\n dform = 255\n else:\n dform = 65535\n # or even better: use numpy arrays, which removes need of for loops\n t = dmin + data * (dmax - dmin) / dform\n return t", "def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)", "def __repr__(self) -> str:\n if self.minimum:\n return 'minmax({}, {})'.format(self.minimum, self.maximum)\n return self.maximum", "def max(self, numeric_only=None):\n assert numeric_only == None\n return self._lift(\"min\")", "def calculate_min_max_tiles(self):", "def limits(f):\n from numpy import array, bool, uint8, uint16, int32\n code = f.dtype\n if code == bool: y=array([0,1])\n elif code == uint8: y=array([0,255])\n elif code == uint16: y=array([0,65535])\n elif code == int32: y=array([-2147483647,2147483647])\n else:\n assert 0,'Does not accept this typecode: %s' % code\n return y", "def minmax(value):\n return min(0xff, max(0, value))", "def min_max(arr, arr_size):\n max_t = arr[0]\n min_t = arr[0]\n for i in range(arr_size):\n if arr[i] > max_t:\n max_t = arr[i]\n if arr[i] < min_t:\n min_t = arr[i]\n return min_t, max_t", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest,lowest)", "def get_tmin_tmax(self, models: List[Model] = None) -> DataFrame:\n if models is None:\n models = self.models\n\n tmintmax = DataFrame(columns=[\"tmin\", \"tmax\"], dtype=\"datetime64[ns]\")\n for ml in models:\n tmintmax.loc[ml.name, [\"tmin\", \"tmax\"]] = [\n ml.get_tmin(),\n ml.get_tmax(),\n ]\n\n return tmintmax", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def scalar_range2tuple(sr: ScalarRange, defaults=(-np.inf, np.inf)):\n return (\n sr.min.value if sr.HasField(\"min\") else defaults[0],\n sr.max.value if sr.HasField(\"max\") else defaults[1],\n )", "def maxmin(x):\n point_list = x[0]\n lons = [f for [f,s] in point_list]\n lats = [s for [f,s] in point_list]\n return [max(lats), min(lats), max(lons), min(lons)]", "def action_store_min(raw_val):\n\n if isinstance(raw_val, list):\n values = []\n for val in raw_val:\n val = auto_type_convert(val)\n if isinstance(val, (int, float)):\n values.append(val)\n if len(values) != 0:\n return min(values)\n else:\n return None\n else:\n return None", "def native_type_range(fmt):\n if fmt == 'c':\n lh = 0, 256\n elif fmt == '?':\n lh = 0, 2\n elif fmt == 'f':\n lh = -(1 << 63), 1 << 63\n elif fmt == 'd':\n lh = -(1 << 1023), 1 << 1023\n else:\n for exp in (128, 127, 64, 63, 32, 31, 16, 15, 8, 7):\n try:\n struct.pack(fmt, (1 << exp) - 1)\n break\n except struct.error:\n pass\n lh = (-(1 << exp), 1 << exp) if exp & 1 else (0, 1 << exp)\n return lh", "def min_max_scale(self, feature_range=(0, 1)):\n\n if len(feature_range) != 2:\n raise AttributeError(\n \"Error: `feature_range` doesn't look like a 2-element tuple?\"\n )\n if feature_range[1] < feature_range[0]:\n raise AttributeError(\"Error: `feature_range` isn't increasing?\")\n\n # use self.__class__ so that child classes can inherit this method\n return self.__class__(\n min_max_scale(self.spectrogram, feature_range=feature_range),\n frequencies=self.frequencies,\n times=self.times,\n decibel_limits=self.decibel_limits,\n )", "def to_range(images, min_value=0.0, max_value=1.0, dtype=None):\n assert \\\n np.min(images) >= -1.0 - 1e-5 and np.max(images) <= 1.0 + 1e-5 \\\n and (images.dtype == np.float32 or images.dtype == np.float64), \\\n 'The input images should be float64(32) and in the range of [-1.0, 1.0]!'\n if dtype is None:\n dtype = images.dtype\n return ((images + 1.) / 2. * (max_value - min_value) + min_value).astype(dtype)", "def _xywh2min_max(box):\n x, y, w, h = box\n return np.array([x, y, x+w, y+h])", "def minmax(self):\r\n vx = [v[0] for v in self.vl]\r\n vy = [v[1] for v in self.vl]\r\n self.xmax, self.xmin = max(vx), min(vx)\r\n self.ymax, self.ymin = max(vy), min(vy)" ]
[ "0.6595436", "0.6573459", "0.63240343", "0.6323688", "0.6292171", "0.62297505", "0.61910516", "0.6189594", "0.6187843", "0.61333334", "0.6130908", "0.61257744", "0.6082531", "0.60706013", "0.60532266", "0.6048332", "0.60448104", "0.60303867", "0.6009819", "0.60043925", "0.5995886", "0.59930295", "0.598443", "0.5968948", "0.5952344", "0.59388274", "0.59077793", "0.5891386", "0.5888934", "0.5883468", "0.5860202", "0.5846941", "0.5846552", "0.5830873", "0.58181113", "0.58175474", "0.5780579", "0.5771377", "0.5757131", "0.5748483", "0.5743375", "0.5721286", "0.57173425", "0.5714666", "0.5709269", "0.56999105", "0.5694176", "0.5672226", "0.56685704", "0.5648692", "0.56483036", "0.5640607", "0.56334776", "0.5626606", "0.5621601", "0.56208885", "0.55986524", "0.5598367", "0.55869746", "0.5576467", "0.55751115", "0.55387914", "0.5528161", "0.55036855", "0.54901445", "0.547111", "0.54488975", "0.5447621", "0.5435881", "0.54352206", "0.5420675", "0.5416127", "0.54150444", "0.5414555", "0.54104763", "0.5404203", "0.53969353", "0.53947926", "0.53902113", "0.537994", "0.536619", "0.535557", "0.5352547", "0.53412414", "0.53321683", "0.5331529", "0.53293324", "0.5327545", "0.5319609", "0.5308358", "0.5307698", "0.53063333", "0.53029567", "0.5299197", "0.52976876", "0.5292747", "0.5291315", "0.5283397", "0.52808356", "0.52756476" ]
0.5915377
26
Generate reference data through executing the relay module
def generate_ref_data(mod, input_data, params=None, target="llvm"): with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): lib = relay.build(mod, target=target, params=params) lib_name = "mod.so" temp = utils.tempdir() lib_path = temp.relpath(lib_name) lib.export_library(lib_path) lib = tvm.runtime.load_module(lib_path) grt_mod = graph_executor.GraphModule(lib["default"](tvm.cpu())) grt_mod.set_input(**input_data) grt_mod.run() output_count = grt_mod.get_num_outputs() out = [grt_mod.get_output(i).numpy() for i in range(output_count)] if isinstance(mod, tvm.relay.Function): main = mod else: main = mod["main"] if main.attrs is None or main.attrs["output_tensor_names"] is None: output_tensor_names = ( ["output"] if output_count == 1 else [f"output{i}" for i in range(output_count)] ) else: output_tensor_names = main.attrs["output_tensor_names"] return dict(zip(output_tensor_names, out))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n self.parser.parse_args()\n\n sys.stdout.write(\"ref: %s\\n\\n\" % self.gen_ref())", "def make_reference(self):\n self.make_reference2()", "def generate(self):", "def genReferences( self, aWeb ):\n try:\n for t in self.commands:\n ref= t.ref( aWeb )\n if ref is not None:\n yield ref\n except Error as e:\n raise", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def script(self):", "def get_data():\n pass", "def use(self):", "def data(self):", "def _get_reference(self):\n super()._get_reference()\n\n # Additional object references from this env\n self.cube_body_id = self.sim.model.body_name2id(\"pot\")\n self.handle_1_site_id = self.sim.model.site_name2id(\"pot_handle_1\")\n self.handle_0_site_id = self.sim.model.site_name2id(\"pot_handle_2\")\n self.table_top_id = self.sim.model.site_name2id(\"table_top\")\n self.pot_center_id = self.sim.model.site_name2id(\"pot_center\")", "def get_data(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def call(self):", "def generate(self):\n pass", "def build(self):", "def build(self):", "def build(self):", "def run(self):\n \n pass", "def myMain(baseDir):\n \n print(\"Test the linear converter DataProxy. \")\n\n from instru import *\n\n fac = Factory(\"DataGenFactory\")\n print(\"Retrieved factory: \" + fac.name)\n \n print(\"Create module from intDataGen factory\")\n intGen = fac.select(\"int32\").create(\"intGen\")\n print(\"module \" + intGen.name + \" created (\" + intGen.internalName + \") \")\n \n print(\"Set output value to 314\")\n intGen.setParameterValue(\"value\", 314)\n \n print(\"Run module\")\n task = runModule(intGen)\n task.wait()\n \n print(\"Return value is: \" + str(intGen.outPort(\"data\").getDataValue()))\n if intGen.outPort(\"data\").getDataValue() != 314 :\n raise RuntimeError(\"Wrong return value: 314 expected. \")\n\n## print(\"Create module from dblFloatDataGen factory\")\n## dblGen = fac.select(\"dblFloat\").create(\"dblGen\")\n## print(\"module \" + dblGen.name + \" created (\" + dblGen.internalName + \") \")\n## \n## print(\"Set output value to 6.022\")\n## dblGen.setParameterValue(\"value\", 6.022)\n## \n## print(\"Run module\")\n## task = runModule(dblGen)\n## task.wait()\n## \n## print(\"Return value is: \" + str(dblGen.outPort(\"data\").getDataValue()))\n## if abs(dblGen.outPort(\"data\").getDataValue() - 6.022) > 0.01 :\n## raise RuntimeError(\"Wrong return value: 6.022 expected. \")\n\n fac = Factory(\"DemoRootFactory\")\n print(\"Retrieved factory: \" + fac.name)\n\n print(\"Create module from leafForwarder factory\")\n forwarder = fac.select(\"branch\").select(\"leafForwarder\").create(\"forwarder\")\n print(\"module \" + forwarder.name + \" created. \")\n\n # query the possible DataProxy class names for DataProxy creation\n proxyClasses = dataProxyClasses() # DataManager::dataProxyClasses()\n print(\"Available data proxy classes: \")\n for proxyClass in proxyClasses:\n print(\" - \" + proxyClass + \": \" + proxyClasses[proxyClass])\n \n print('Proxy creation using the constructor: DataProxy(\"LinearConverter\")')\n linearProxy = DataProxy(\"LinearConverter\") \n print(\" - Name: \" + linearProxy.name)\n print(\" - Description: \" + linearProxy.description)\n\n print(\"Bind the output of intGen to the forwarder via the proxy\")\n bind(intGen.outPort(\"data\"), forwarder.inPorts()[0], linearProxy)\n\n print(\"Run module intGen\")\n runModule(intGen)\n waitAll()\n \n print(\"Return value is: \" + str(forwarder.outPorts()[0].getDataValue()))\n if forwarder.outPorts()[0].getDataValue() != 314 :\n raise RuntimeError(\"Wrong return value: 314 expected. \")\n\n print(\"Set linear converter scaling factor to -1\")\n linearProxy.setParameterValue(\"scale\", -1)\n\n print(\"Run module intGen\")\n runModule(intGen)\n waitAll()\n \n print(\"Return value is: \" + str(forwarder.outPorts()[0].getDataValue()))\n if forwarder.outPorts()[0].getDataValue() != -314 :\n raise RuntimeError(\"Wrong return value: -314 expected. \")\n\n print(\"Set linear converter scaling factor to 1/314\")\n linearProxy.setParameterValue(\"scale\", 1./314)\n\n print(\"Run module intGen\")\n runModule(intGen)\n waitAll()\n \n print(\"Return value is: \" + str(forwarder.outPorts()[0].getDataValue()))\n if forwarder.outPorts()[0].getDataValue() != 1 :\n raise RuntimeError(\"Wrong return value: 1 expected. \")\n\n print(\"Set linear converter scaling factor to 1/3\")\n linearProxy.setParameterValue(\"scale\", 1./3)\n print(\"Set linear converter offset to -104.6\")\n linearProxy.setParameterValue(\"offset\", -104.7)\n\n print(\"Run module intGen\")\n runModule(intGen)\n waitAll()\n \n print(\"Return value is: \" + str(forwarder.outPorts()[0].getDataValue()))\n if forwarder.outPorts()[0].getDataValue() != 0 :\n raise RuntimeError(\"Wrong return value: 0 expected. \")\n\n print(\"End of script linearConverterTest.py\")", "def createReference(self, fromnode, tonode, edge_data='direct'):\n return super(ModuleGraph, self).createReference(fromnode, tonode, edge_data=edge_data)", "def transfer_data(self):\n pass", "def __call__(self):\n\n link = hardlink.Command(dry_run=self.dry_run, force=True)\n\n # Load config.\n try:\n with open(self.config_path) as file:\n config = yaml.load(file.read())\n except Exception:\n self._err('Unable to load config: %s' % self.config_path)\n\n # Get JSON database for processed torrent status.\n database_path = config.get('db', DATABASE_PATH)\n logger.debug('Loading database: %s' % database_path)\n try:\n db = jsondict.JsonDict(database_path, autosave=True)\n except ValueError:\n self._err('Unable to load database: %s' % database_path)\n\n # Get mapped paths.\n mapped_paths = config.get('mapped_remote_paths', {}).items()\n\n # Get Transmission client.\n host = config.get('transmission_host', 'localhost')\n port = config.get('transmission_port', 9091)\n logger.debug('Connecting to Transmission: %s:%s' % (host, port))\n client = transmission.Transmission(host=host, port=port)\n\n # Get torrents from Transmission.\n try:\n torrents = client('torrent-get', fields=[\n 'downloadDir',\n 'id',\n 'name',\n 'percentDone',\n 'secondsSeeding',\n 'uploadRatio',\n ])['torrents']\n except requests.ConnectionError:\n self._err('Unable to connect to Transmission at %s:%s' % (\n host,\n port,\n ))\n\n # Store torrents found in a known torrents directory for later\n # processing.\n found_torrents = set()\n\n # Hard link or remove completed torrents.\n for torrent in torrents:\n remote_path = os.path.join(\n torrent['downloadDir'], torrent['name'])\n\n # Get local path for remote path.\n local_path = self.get_local_path(remote_path, mapped_paths)\n\n # Find matching torrents directory.\n for dir_config in config['torrent_dirs']:\n\n # Get config for torrent directory.\n download_dir = dir_config['download_dir']\n post_processing_dir = dir_config['post_processing_dir']\n ratio = dir_config.get('ratio')\n seed_days = dir_config.get('seed_days')\n\n if local_path.startswith(download_dir):\n found_torrents.add(local_path)\n\n # Get downloaded and seeding status.\n downloaded = bool(torrent['percentDone'] == 1)\n\n # Get processed status.\n processed = bool(downloaded and db.get(remote_path))\n\n # Get seeding status.\n seeding = bool(\n ratio and\n ratio > torrent['uploadRatio'] or\n seed_days and\n seed_days * DAY_SECONDS > torrent['secondsSeeding'])\n\n # Hard link downloaded torrents to the post processing\n # directory.\n if downloaded and not processed:\n logger.info('Processing torrent: %s' % local_path)\n destination = os.path.join(\n post_processing_dir,\n os.path.relpath(local_path, download_dir),\n )\n link(local_path, destination)\n if not self.dry_run:\n db[remote_path] = True\n\n # Remove processed torrents that have finished seeding.\n elif processed and not seeding:\n if not self.remove:\n logger.debug(\n 'Not removing inactive torrent: %s' %\n torrent['name'])\n else:\n logger.info(\n 'Removing inactive torrent: %s' %\n torrent['name'])\n if not self.dry_run:\n client(\n 'torrent-remove',\n ids=[torrent['id']],\n delete_local_data=True,\n )\n del db[remote_path]\n\n else:\n # Ignore torrents that are still downloading or\n # seeding.\n logger.debug(\n 'Skipping active torrent: %s' % local_path)\n\n # Log torrent data, regardless of action taken.\n logger.debug(' - Downloaded: %d%%' % (\n torrent['percentDone'] * 100))\n if ratio:\n logger.debug(' - Ratio: %.2f (%s)' % (\n torrent['uploadRatio'],\n ratio,\n ))\n if seed_days:\n logger.debug(' - Seed Time: %.2f (%s) days' % (\n torrent['secondsSeeding'] / DAY_SECONDS,\n seed_days,\n ))\n\n # We found a match. No need to continue.\n break\n\n # No matching torrents directory.\n else:\n logger.debug(\n 'Skipping torrent not located in any download directory: '\n '%s' % local_path)\n\n # Process orphaned torrent data in download directories.\n for dir_config in config['torrent_dirs']:\n download_dir = dir_config['download_dir']\n post_processing_dir = dir_config['post_processing_dir']\n\n # Skip download directories that don't exist.\n if not os.path.isdir(download_dir):\n continue\n\n # Compare every file and directory in the download directory to\n # previously found torrents. Remove anything already processed and\n # not belonging to a found torrent. Hard link everything else to\n # the post processing directory.\n for path in os.listdir(download_dir):\n\n # Ignore hidden files, e.g. dot-underscore files on OS X.\n if path.startswith('.'):\n continue\n\n local_path = os.path.join(download_dir, path)\n\n # Find matching torrent for this path.\n for found_torrent in found_torrents:\n if local_path.startswith(found_torrent):\n # We found a match. No need to continue.\n break\n\n # No matching torrent.\n else:\n\n # Get remote path for local path.\n remote_path = self.get_remote_path(\n local_path, mapped_paths)\n\n # Get processed status.\n processed = db.get(remote_path, False)\n\n # Hard link orphaned files that have not been processed.\n if not processed:\n logger.info(\n 'Processing orphaned file or directory: %s' %\n local_path)\n destination = os.path.join(\n post_processing_dir,\n os.path.relpath(local_path, download_dir),\n )\n link(local_path, destination)\n # No need to add path to database, it would be removed\n # immediately in the next code block.\n\n # Remove orphaned files that have been processed.\n logger.info(\n 'Removing orphaned file or directory: %s' %\n local_path)\n if not self.dry_run:\n try:\n shutil.rmtree(local_path)\n except OSError:\n os.remove(local_path)\n # Remove path from database.\n if remote_path in db:\n del db[remote_path]\n\n # Remove stale records in database. Convert database keys to list to\n # avoid `RuntimeError: dictionary changed size during iteration`.\n for remote_path in list(db):\n\n # Get local path for remote path.\n local_path = self.get_local_path(remote_path, mapped_paths)\n\n # Remove from database.\n if not os.path.exists(local_path):\n logger.debug(\n 'Removing stale record from database: %s' % remote_path)\n if not self.dry_run:\n del db[remote_path]", "def get_data(self):\r\n pass", "def RUN(self):", "def get_data(self):\n pass", "def get_data(self):\n pass", "def announceGenerate(self):", "def generate():", "def codegen_reload_data():\n return {\n \"package\": u\"fn_utilities\",\n \"message_destinations\": [u\"fn_utilities\"],\n \"functions\": [u\"utilities_artifact_hash\", u\"utilities_attachment_hash\", u\"utilities_attachment_to_base64\", u\"utilities_attachment_zip_extract\", u\"utilities_attachment_zip_list\", u\"utilities_base64_to_artifact\", u\"utilities_base64_to_attachment\", u\"utilities_call_rest_api\", u\"utilities_domain_distance\", u\"utilities_email_parse\", u\"utilities_excel_query\", u\"utilities_expand_url\", u\"utilities_extract_ssl_cert_from_url\", u\"utilities_get_contact_info\", u\"utilities_json2html\", u\"utilities_parse_ssl_certificate\", u\"utilities_pdfid\", u\"utilities_resilient_search\", u\"utilities_shell_command\", u\"utilities_string_to_attachment\", u\"utilities_timer\", u\"utilities_xml_transformation\"],\n \"workflows\": [u\"example_artifact_attachment_to_base64\", u\"example_artifact_hash\", u\"example_attachment_hash\", u\"example_attachment_to_base64\", u\"example_call_rest_api\", u\"example_create_artifacts_from_excel_data\", u\"example_domain_distance\", u\"example_email_parsing_artifact\", u\"example_email_parsing_attachment\", u\"example_extract_ssl_cert_from_url\", u\"example_get_incident_contact_info\", u\"example_get_task_contact_info\", u\"example_json2html\", u\"example_parse_ssl_certificate\", u\"example_pdfid\", u\"example_resilient_search\", u\"example_shell_command\", u\"example_string_to_attachment\", u\"example_timer\", u\"example_timer_parallel\", u\"example_xml_transformation\", u\"example_zip_list\", u\"example_zip_to_artifact\", u\"utilities_expand_url\"],\n \"actions\": [u\"Example: (Artifact) Attachment to Base64\", u\"Example: Artifact Hash\", u\"Example: Attachment Hash\", u\"Example: Attachment to Base64\", u\"Example: Call REST API\", u\"Example: Domain Distance\", u\"Example: Email Parsing (Artifact)\", u\"Example: Email Parsing (Attachment)\", u\"Example: Expand URL\", u\"Example: Extract SSL Certificate\", u\"Example: Get Incident Contact Info\", u\"Example: Get Task Contact Info\", u\"Example: JSON2HTML\", u\"Example: Parse SSL Certificate\", u\"Example: PDFiD\", u\"Example: Resilient Search\", u\"Example: Shell Command\", u\"Example: String to Attachment\", u\"Example: Timer Epoch\", u\"Example: Timers in Parallel\", u\"Example: Use Excel Data\", u\"Example: XML Transformation\", u\"Example: Zip Extract\", u\"Example: Zip List\"],\n \"incident_fields\": [],\n \"incident_artifact_types\": [],\n \"incident_types\": [],\n \"datatables\": [],\n \"automatic_tasks\": [],\n \"scripts\": [u\"Convert JSON to rich text v1.0\"],\n \"playbooks\": []\n }", "def process(self):", "def process(self):", "def process(self):", "def AcquiredData (self) :\n\t\treturn self.run(\"AcquiredData\")", "def main():\n print(dumps(get_data()))\n return 0", "def remote_getPhases():", "def main():\n insert_gateway_values(\"hermes/bin/gateways.txt\")\n return", "def _build(self):", "def _build(self):", "def data(self):\n pass", "def data(self):\n pass", "def fetch_data(self):", "def __call__(self):\n return self.referee()", "def target(self):", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def onSetRelayOutput(self, event):", "def get_data():\n return", "def run(self):\n\t\t\n\t\tpass", "def __generate_reference__(self, triple_map, **kwargs):\n pass", "def __generate_reference__(self, triple_map, **kwargs):\n pass", "def output_data(self):\n pass", "def codegen_reload_data():\n return {\n \"package\": u\"fn_whois_rdap\",\n \"message_destinations\": [u\"fn_whois_rdap\"],\n \"functions\": [u\"rdap_query\", u\"whois_rdap_query\"],\n \"workflows\": [u\"example_rdap_query\", u\"example_whois_query\"],\n \"actions\": [u\"Run rdap query against Artifact\", u\"Run whois query against Artifact (RDAP)\"],\n \"incident_fields\": [],\n \"incident_artifact_types\": [],\n \"datatables\": [],\n \"automatic_tasks\": [],\n \"scripts\": []\n }", "def gen_data(self,do_print=True,force_gen_inputs=False):\n\n \n if do_print:\n print\n print 'Generating corr space data, id = %s'%self.id\n \n self.post_init(force_gen_inputs=force_gen_inputs)\n self.run()\n self.post_run()", "def __init__(self):\r\n super(DataTarget, self).__init__()", "def setRef(self,reference):\n (iMod,iObj) = reference\n self.rnam.setData(struct.pack('i',iObj)[:3] + struct.pack('B',iMod))\n self.setChanged()", "def task(self):", "def task(self):", "def run(self): \r\n return", "def ref(request):\n r = referencepytest.ref(request)\n this_dir = os.path.abspath(os.path.dirname(__file__))\n r.set_data_location(os.path.join(this_dir, '..', 'reference'))\n return r", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def generate(self):\r\n raise NotImplementedError", "def main(self):", "def __init__(self,object_data, xtremio_connection):\r\n self.data = object_data\r\n self.name = object_data[\"name\"]\r\n self.href = object_data[\"href\"]\r\n self.parent_connection = xtremio_connection\r\n\r\n # Object name and type comes off the end of the href URL\r\n self.object_id = object_data[\"href\"].split(\"/\")[-1] \r\n self.object_type = object_data[\"href\"].split(\"/\")[-2]\r\n\r\n # We track the sys-id of the system the object is from, since we will\r\n # regularly need it\r\n self.initial_object_details = self.get_details()\r\n self.sys_id = self.initial_object_details[\"sys-id\"]", "def __call__(self):\n return self.generate()", "def handle(self):", "def regenerate_references():\n for circuit_type in TestDrawingMethods.circuits:\n for draw_method in TestDrawingMethods.draw_methods:\n references_dir = super()._get_resource_path(os.path.join(circuit_type),\n path=Path.CIRCUIT_DRAWERS_REFERENCES)\n\n references_dir = os.path.join(references_dir)\n if not os.path.exists(references_dir):\n os.makedirs(references_dir)\n\n reference_output = os.path.join(references_dir, draw_method)\n\n # Make underlying circuit drawer to draw chosen circuit\n circuit_drawer(TestDrawingMethods.circuits[circuit_type](),\n output=draw_method,\n filename=reference_output)", "def DM(self):", "def load_data(self):", "def run(self):\r\n pass" ]
[ "0.61367774", "0.573806", "0.56130826", "0.5543687", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.55034256", "0.5495478", "0.5490769", "0.54810923", "0.544518", "0.5430902", "0.5426326", "0.5412864", "0.5412864", "0.5412864", "0.5412864", "0.5411444", "0.54028183", "0.54028183", "0.54028183", "0.53885436", "0.5386609", "0.5363981", "0.5363981", "0.5363981", "0.5280925", "0.52734715", "0.526796", "0.52564675", "0.52492464", "0.52313054", "0.52296996", "0.5220095", "0.5220095", "0.5213029", "0.5212235", "0.52088416", "0.5206739", "0.5206739", "0.5206739", "0.5203597", "0.5203247", "0.51786363", "0.51714116", "0.5154186", "0.5154186", "0.51355195", "0.51355195", "0.51294744", "0.5115584", "0.5114236", "0.5096664", "0.5096664", "0.5096664", "0.50918895", "0.5080179", "0.50751936", "0.5070566", "0.5070566", "0.50620514", "0.5055568", "0.50525206", "0.5049376", "0.50461435", "0.5037216", "0.5037216", "0.50353837", "0.50266427", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.5017459", "0.50164014", "0.5015449", "0.50126183", "0.5012433", "0.50089645", "0.5005456", "0.5004762", "0.50012416", "0.4991074" ]
0.61549
0
A helper function to create a Relay IRModule with inputs and params from a tflite file
def create_relay_module_and_inputs_from_tflite_file(tflite_model_file, bind_params_by_name=True): with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() mod, params = convert_to_relay(tflite_model_buf, bind_params_by_name) inputs = dict() for param in mod["main"].params: name = str(param.name_hint) data_shape = [int(i) for i in param.type_annotation.shape] dtype = str(param.type_annotation.dtype) if np.issubdtype(dtype, np.floating): # Since np.random.uniform only allows the ranges of float32, # at first float16 is used and scaled afterwards, if necessary. in_min, in_max = (np.finfo("float16").min, np.finfo("float16").max) data = np.random.uniform(low=in_min, high=in_max, size=data_shape).astype(dtype) scale = np.finfo(dtype).min / np.finfo("float16").min data *= scale elif np.issubdtype(dtype, np.integer): in_min, in_max = (np.iinfo(dtype).min, np.iinfo(dtype).max) data = np.random.randint(in_min, high=in_max, size=data_shape, dtype=dtype) else: raise TypeError(f"Type {dtype} not supported") inputs[name] = data return mod, inputs, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tflite_load_model(model_file):\n interpreter = tf.lite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def convert_to_relay(tflite_model_buf, bind_params_by_name=True):\n # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1\n try:\n import tflite.Model # pylint: disable=import-outside-toplevel\n\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n except AttributeError:\n import tflite # pylint: disable=import-outside-toplevel\n\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n mod, params = relay.frontend.from_tflite(tflite_model)\n if bind_params_by_name:\n mod[\"main\"] = relay.build_module.bind_params_by_name(mod[\"main\"], params)\n return mod, params", "def load_model(model_file):\n # Load TFLite model and allocate tensors.\n interpreter = tflite.Interpreter(model_path=model_file)\n interpreter.allocate_tensors()\n return interpreter", "def _main(args):\n # model = keras.models.load_model(args.h5_file, custom_objects={'prelu': prelu})\n with tf.keras.utils.custom_object_scope({'prelu': prelu}):\n converter = tf.lite.TFLiteConverter.from_keras_model_file(args.h5_file)\n tflite_file = converter.convert()\n open(args.tflite_file, 'wb').write(tflite_file)\n print('='*30)\n print('tffile file save in {}.'.format(args.tflite_file))", "def get_src(self):\n\n self.codegen = json.loads(self.cmod.get_source(\"json\"))\n self.sub_module_name = self.codegen[\"symbol\"]\n self.nodes = self.codegen[\"nodes\"]\n self.clml_code.append(self.MakeHeader.substitute(module=self.sub_module_name))\n\n def get_tensor_from_map(\n node_seq, shape=None, layout=\"CL_TENSOR_LAYOUT_OPTIMAL_QCOM\", dtype=\"float32\"\n ):\n if node_seq in self.node_map:\n return self.node_map[node_seq]\n else:\n node = self.nodes[node_seq]\n dtype = str(node[\"attrs\"][\"dtype\"][0][0])\n if node[\"op\"] == \"input\":\n self.clml_code.append(\"// Input Node\")\n node_out_name = self.sub_module_name + \"_\" + \"input_\" + str(node_seq)\n else:\n node_out_name = node[\"name\"]\n if shape is None:\n shape = str(tuple(node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n\n self.clml_code.append(\n self.MakeCLMLTensor.substitute(\n name=node_out_name, shape=shape, dtype=dtype, layout=layout\n )\n )\n self.clml_code.append(\n self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)\n )\n if node[\"op\"] == \"input\":\n self.clml_code.append(\n Template(\"runner.inputs.push_back($clml_input);\").substitute(\n clml_input=node_out_name\n )\n )\n self.input_meta.append(\n self.MakeInputMetaInfo.substitute(\n in_name=node_out_name, dtype=dtype, shape=shape\n )\n )\n\n if self.nodes[node_seq][\"op\"] == \"const\":\n self.clml_code.append(\n Template('runner.consts.push_back(\"$nid\");').substitute(nid=node[\"name\"])\n )\n self.node_map[node_seq] = node_out_name\n return node_out_name\n\n def make_output_tensor(\n node, node_seq, shape=None, layout=\"CL_TENSOR_LAYOUT_OPTIMAL_QCOM\", dtype=\"float32\"\n ):\n if dtype is None:\n dtype = str(node[\"attrs\"][\"dtype\"][0][0])\n if shape is None:\n shape = str(tuple(node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n node_out_name = self.sub_module_name + \"_\" + \"layer_out_\" + str(node_seq)\n self.clml_code.append(\n self.MakeCLMLTensor.substitute(\n name=node_out_name,\n shape=shape,\n dtype=dtype,\n layout=layout,\n )\n )\n return node_out_name\n\n for node_seq, node in enumerate(self.nodes):\n if node[\"op\"] == \"kernel\":\n self.clml_code.append(\"// Kernel Node : \" + node[\"name\"])\n if node[\"name\"] == \"nn.conv2d\" or node[\"name\"] == \"nn.depthwise_conv2d\":\n if \"padding\" in node[\"attrs\"]:\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"padding\"][0]))[1:-1]\n else:\n padding = \"0, 0, 0, 0\"\n dilation = str(tuple(int(x) for x in node[\"attrs\"][\"dilation\"][0]))[1:-1]\n strides = str(tuple(int(x) for x in node[\"attrs\"][\"strides\"][0]))[1:-1]\n groups = node[\"attrs\"][\"groups\"][0][0]\n if node[\"name\"] == \"nn.conv2d\":\n mode = \"CL_CONVOLUTION_MODE_CONVOLUTION_QCOM\"\n else:\n mode = \"CL_CONVOLUTION_MODE_DEPTHWISE_QCOM\"\n activation = \"CL_ACTIVATION_RELU\"\n has_act = False\n if \"activation_type\" in node[\"attrs\"]:\n has_act = True\n activation = node[\"attrs\"][\"activation_type\"][0][0]\n if activation == \"relu\":\n activation = \"CL_ACTIVATION_RELU\"\n elif activation == \"relu6\":\n activation = \"CL_ACTIVATION_RELU6\"\n else:\n RuntimeError(\"Unknown activation:\" + activation)\n has_bias = bool((node[\"inputs\"] == 3) or (node[\"inputs\"] == 7))\n has_bn = bool((node[\"inputs\"] == 6) or (node[\"inputs\"] == 7))\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n weight_tensor = get_tensor_from_map(node[\"inputs\"][1][0])\n if not has_bias:\n bias_tensor = \"runner.unusedTensor\"\n else:\n bias_tensor = get_tensor_from_map(node[\"inputs\"][2][0])\n\n node_out_name = make_output_tensor(node, node_seq)\n\n if not has_bn:\n self.clml_code.append(\n self.MakeConv2D.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n bias_tensor=bias_tensor,\n output_tensor=node_out_name,\n padding=padding,\n dilation=dilation,\n strides=strides,\n groups=groups,\n mode=mode,\n activation=activation,\n has_bias=\"true\" if has_bias else \"false\",\n has_act=\"true\" if has_act else \"false\",\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n else:\n bn_index = 3 if has_bias else 2\n bn_attrs = tuple(node[\"attrs\"][\"batchnorm\"][0][0])\n axis = bn_attrs[0]\n bn_shape = [1, 1, 1, 1]\n bn_node = self.nodes[node[\"inputs\"][bn_index][0]]\n bn_shape[axis] = bn_node[\"attrs\"][\"shape\"][0][0]\n dtype = bn_node[\"attrs\"][\"dtype\"][0][0]\n\n bn_scale_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_bias_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 1][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_mean_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 2][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n bn_var_tensor = get_tensor_from_map(\n node[\"inputs\"][bn_index + 3][0],\n shape=str(tuple(bn_shape))[1:-1],\n dtype=dtype,\n )\n\n self.clml_code.append(\n self.MakeConv2DWithBN.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n bias_tensor=bias_tensor,\n output_tensor=node_out_name,\n bn_scale_tensor=bn_scale_tensor,\n bn_bias_tensor=bn_bias_tensor,\n bn_mean_tensor=bn_mean_tensor,\n bn_var_tensor=bn_var_tensor,\n bn_attrs=str(bn_attrs)[1:-1],\n padding=padding,\n dilation=dilation,\n strides=strides,\n groups=groups,\n mode=mode,\n activation=activation,\n has_bias=\"true\" if has_bias else \"false\",\n has_act=\"true\" if has_act else \"false\",\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.relu6\" or node[\"name\"] == \"nn.relu\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n relu_type = (\n \"CL_ACTIVATION_RELU\" if node[\"name\"] == \"nn.relu\" else \"CL_ACTIVATION_RELU6\"\n )\n self.clml_code.append(\n self.MakeRelu.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n relu_type=relu_type,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.batch_norm\":\n bn_attrs = tuple(node[\"attrs\"][\"batchnorm\"][0][0])\n axis = bn_attrs[0]\n bn_shape = [1, 1, 1, 1]\n bn_node = self.nodes[node[\"inputs\"][0][0]]\n bn_shape[axis] = bn_node[\"attrs\"][\"shape\"][0][0]\n dtype = bn_node[\"attrs\"][\"dtype\"][0][0]\n bn_scale_tensor = get_tensor_from_map(\n node[\"inputs\"][0][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_bias_tensor = get_tensor_from_map(\n node[\"inputs\"][1][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_mean_tensor = get_tensor_from_map(\n node[\"inputs\"][2][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n bn_var_tensor = get_tensor_from_map(\n node[\"inputs\"][3][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype\n )\n\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n\n self.clml_code.append(\n self.MakeBN.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n bn_scale_tensor=bn_scale_tensor,\n bn_bias_tensor=bn_bias_tensor,\n bn_mean_tensor=bn_mean_tensor,\n bn_var_tensor=bn_var_tensor,\n bn_attrs=str(bn_attrs)[1:-1],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\"nn.max_pool2d\", \"nn.avg_pool2d\", \"nn.l2_pool2d\"]:\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n pool_size = str(tuple(int(x) for x in node[\"attrs\"][\"pool_size\"][0]))[1:-1]\n strides = str(tuple(int(x) for x in node[\"attrs\"][\"strides\"][0]))[1:-1]\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"padding\"][0]))[1:-1]\n self.clml_code.append(\n self.MakePool2D.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n pool_type=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\"nn.global_max_pool2d\", \"nn.global_avg_pool2d\"]:\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n in_node = self.nodes[node[\"inputs\"][0][0]]\n in_shape = str(tuple(in_node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n self.clml_code.append(\n self.MakeGlobalPool2D.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n in_shape=in_shape,\n pool_type=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"reshape\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeReshape.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"concatenate\":\n input_len = len(node[\"inputs\"])\n in_list = str(\n [get_tensor_from_map(node[\"inputs\"][x][0]) for x in range(input_len)]\n )[1:-1]\n node_out_name = make_output_tensor(node, node_seq)\n axis = node[\"attrs\"][\"axis\"][0][0]\n self.clml_code.append(\n self.MakeConcatenate.substitute(\n in_list=in_list,\n output_tensor=node_out_name,\n axis=axis,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.dense\":\n in_node = self.nodes[node[\"inputs\"][0][0]]\n in_shape = tuple(in_node[\"attrs\"][\"shape\"][0][0])\n wt_shape = tuple(in_node[\"attrs\"][\"shape\"][0][0])\n input_tensor = get_tensor_from_map(\n node[\"inputs\"][0][0], layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\"\n )\n weight_tensor = get_tensor_from_map(\n node[\"inputs\"][1][0],\n shape=str(tuple([1, 1, wt_shape[0], wt_shape[1]]))[1:-1],\n layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\",\n )\n node_out_name = make_output_tensor(\n node,\n node_seq,\n shape=str(tuple([in_shape[0], wt_shape[0], 1, 1]))[1:-1],\n layout=\"CL_TENSOR_LAYOUT_NCHW_QCOM\",\n )\n self.clml_code.append(\n self.MakeDense.substitute(\n input_tensor=input_tensor,\n weight_tensor=weight_tensor,\n output_tensor=node_out_name,\n in_shape=str(in_shape)[1:-1],\n wt_shape=str(wt_shape)[1:-1],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.softmax\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeSoftMax.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.pad\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n pad_mode = node[\"attrs\"][\"pad_mode\"][0][0]\n padding = str(tuple(int(x) for x in node[\"attrs\"][\"pad_width\"][0]))[1:-1]\n self.clml_code.append(\n self.MakePad.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n pad_mode=pad_mode,\n padding=padding,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"nn.batch_flatten\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeBatchFlatten.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] == \"clip\":\n input_tensor = get_tensor_from_map(node[\"inputs\"][0][0])\n node_out_name = make_output_tensor(node, node_seq)\n a_max = node[\"attrs\"][\"a_max\"][0][0]\n a_min = node[\"attrs\"][\"a_min\"][0][0]\n self.clml_code.append(\n self.MakeClip.substitute(\n input_tensor=input_tensor,\n output_tensor=node_out_name,\n a_max=a_max,\n a_min=a_min,\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n elif node[\"name\"] in [\n \"add\",\n \"subtract\",\n \"multiply\",\n \"minimum\",\n \"maximum\",\n \"divide\",\n ]:\n input_a = get_tensor_from_map(node[\"inputs\"][0][0])\n input_b = get_tensor_from_map(node[\"inputs\"][1][0])\n node_out_name = make_output_tensor(node, node_seq)\n self.clml_code.append(\n self.MakeBinaryOp.substitute(\n input_a=input_a,\n input_b=input_b,\n output_tensor=node_out_name,\n op=node[\"name\"],\n dtype=node[\"attrs\"][\"dtype\"][0][0],\n )\n )\n else:\n RuntimeError(\"Unsupported Op:\" + node[\"name\"])\n self.clml_code.append(\n self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)\n )\n self.node_map[node_seq] = node_out_name\n\n elif node[\"op\"] not in [\"const\", \"input\"]:\n print(\"Unknown Node type:\", node[\"op\"])\n\n # Populate outputs\n out_nodes = self.codegen[\"heads\"]\n self.clml_code.append(\"// Populate outputs\")\n for nid_triple in out_nodes:\n nid = nid_triple[0]\n out_node = self.nodes[nid]\n dtype = str(out_node[\"attrs\"][\"dtype\"][0][0])\n shape = str(tuple(out_node[\"attrs\"][\"shape\"][0][0]))[1:-1]\n out_name = self.sub_module_name + \"_\" + \"layer_out_\" + str(nid)\n self.clml_code.append(\n Template(\n 'runner.outputs.insert({\"$out_name\", runner.storage_map[\"$out_name\"]});'\n ).substitute(out_name=out_name)\n )\n self.clml_code.append(\n Template('runner.outputs_dtypes.insert({\"$out_name\", \"$dtype\"});').substitute(\n out_name=out_name, dtype=dtype\n )\n )\n self.clml_code.append(\n Template(\n \"runner.outputs_shapes.insert\" '({\"$out_name\", std::vector<size_t>({$shape})});'\n ).substitute(out_name=out_name, shape=shape)\n )\n self.output_meta.append(\n self.MakeOutputMetaInfo.substitute(out_name=out_name, dtype=dtype, shape=shape)\n )\n\n # Mem allocation & Param copy\n self.clml_code.append(\"// Allocate Tensor Memory and copy params\")\n self.clml_code.append(\"runner.AllocateMemAndPopulateParams();\")\n\n # Meta data preparation\n self.clml_code.append(\n self.MakeMetaInfo.substitute(\n name=self.sub_module_name,\n input_count=len(self.input_meta),\n output_count=len(self.output_meta),\n input_meta=\"\\\\\\n\".join(self.input_meta),\n output_meta=\"\\\\\\n\".join(self.output_meta),\n )\n )\n\n self.clml_code.append(self.MakeFooter.substitute())\n return (self.sub_module_name, self.clml_code)", "def from_tensorflow(self, graph, layout=\"NHWC\", shape=None, outputs=None):\n func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs)\n self._mod[\"main\"] = func\n return self._mod, self._params", "def from_tflite(model, prog_name): #, shape_dict, dtype_dict):\n try:\n import tflite.Model\n import tflite.SubGraph\n import tflite.BuiltinOperator\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n assert isinstance(model, tflite.Model.Model)\n\n # keep the same as tflite\n assert model.SubgraphsLength() == 1, \"only support one subgraph (main subgraph)\"\n subgraph = model.Subgraphs(0)\n\n # model inputs / outputs\n model_inputs = subgraph.InputsAsNumpy()\n model_outputs = subgraph.OutputsAsNumpy()\n assert model_inputs.size == 1, \"Model should have only one input\"\n assert model_outputs.size == 1, \"Model should have only one output\"\n\n # op code in model\n op_converter = OperatorConverter(model, subgraph, prog_name)\n op_converter.is_dequantize = False\n op_converter.check_unsupported_ops()\n\n in_tensor = op_converter.get_tensors(model_inputs)[0]\n out_tensor = op_converter.get_tensors(model_outputs)[0]\n\n op_converter.define_model_sizes(\"IN\", in_tensor)\n op_converter.define_model_sizes(\"OUT\", out_tensor)\n\n op_converter.nn_add_input(in_tensor)\n\n output_nodes = op_converter.convert_op_to_hexagon_nn()\n\n op_converter.nn_add_output(output_nodes)\n\n op_converter.print_nn_nodes()\n\n print(\"tensor_tab:\")\n print(op_converter.tensor_tab)\n\n op_converter.close()\n print(\"Converted Hexagon Model saved to {}\".format(op_converter.h_file.name))", "def _get_model(self, model_path='model.tflite'):\n interpreter = tf.lite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n return interpreter", "def resnext34(**kwargs):\n model = ResNeXt(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def resnext18( **kwargs):\n model = ResNeXt(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def compile_ir(engine, llvm_ir):\n # Create a LLVM module object from the IR\n mod = llvm.parse_assembly(llvm_ir)\n mod.verify()\n # Now add the module and make sure it is ready for execution\n engine.add_module(mod)\n engine.finalize_object()\n return mod", "def __init__(self, model_name: str, label_file: str) -> None:\n\n # Append TFLITE extension to model_name if there's no extension\n _, ext = os.path.splitext(model_name)\n if not ext:\n model_name += '.tflite'\n\n # Initialize the TFLite model.\n interpreter = Interpreter(model_path=model_name, num_threads=4)\n interpreter.allocate_tensors()\n\n self._input_index = interpreter.get_input_details()[0]['index']\n self._output_index = interpreter.get_output_details()[0]['index']\n self._interpreter = interpreter\n\n self.pose_class_names = self._load_labels(label_file)", "def read_module(stream):\n data = stream.read()\n if len(data) % 4 != 0:\n raise ParseError('File length is not divisible by 4')\n words = array.array('I', data)\n binary = SpirvBinary(words)\n\n module = ir.Module()\n module.value_to_id = {}\n try:\n parse_global_instructions(binary, module)\n parse_functions(binary, module)\n return module\n finally:\n del module.value_to_id", "def file_based_input_fn_builder(input_file):\n # 存放解析自TFRecord文件的数据\n name_to_features = {\n \"input_q\":tf.FixedLenFeature([shape],tf.int64),\n \"input_K\":tf.FixedLenFeature([],tf.float32),\n \"input_v\":tf.FixedLenFeature([],tf.float32),\n }\n\n def _decode_record(record,name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record,name_to_features)\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size = 100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record:_decode_record(record, name_to_features),\n batch_size = batch_size,\n drop_remainder=drop_remainder))\n return d\n return input_fn", "def create_model_from_file(\n weights_fn: Path, gpu: bool = True, device_num: int = 0,\n) -> Tuple[torch.nn.Module, int, dict]:\n if gpu:\n map_location = f\"cuda:{device_num}\"\n else:\n map_location = \"cpu\"\n weights_fn = weights_fn.resolve()\n logging.info(\"Loading model dictionary from file.\")\n model_dict = torch.load(weights_fn, map_location=map_location)\n model = create_model_on_device(device_num, model_dict[\"model_struc_dict\"])\n logging.info(\"Loading in the saved weights.\")\n model.load_state_dict(model_dict[\"model_state_dict\"])\n return model, model_dict[\"model_struc_dict\"][\"classes\"], model_dict[\"label_codes\"]", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def load_module(id=None, datatype=None, action=None,\n version='0.0', fields=[]):\n\n icon = {\n 'URI': config.IMAGES + \"load.png\",\n 'terminals': {\n 'output': (20, 10, 1, 0),\n }\n }\n \n terminals = [\n dict(id='output',\n datatype=datatype,\n use='out',\n description='data',\n ),\n ]\n\n files_field = {\n \"type\":\"[file]\",\n \"label\": \"Files\",\n \"name\": \"files\",\n \"value\": '',\n }\n intent_field = {\n \"type\":\"string\",\n \"label\":\"Intent\",\n \"name\": \"intent\",\n \"value\": '',\n }\n \n # Combine everything into a module.\n module = Module(id=id,\n name='Load',\n version=version,\n description=action.__doc__,\n #icon=icon,\n terminals=terminals,\n fields=[files_field, intent_field] + fields,\n action=action,\n )\n\n return module", "def create_model(hparams, mode):\n\n graph = tf.Graph()\n\n with graph.as_default():\n with tf.name_scope(\"input_pipe\"):\n dataset = create_dataset(hparams, mode)\n iterator = dataset.make_initializable_iterator()\n model = LMandBDRNNModel(hparams=hparams,\n iterator=iterator,\n mode=mode)\n\n sess = tf.Session(graph=graph)\n\n modeltuple = ModelTuple(graph=graph, iterator=iterator,\n model=model, session=sess)\n\n return modeltuple", "def create_graph():\n with gfile.FastGFile(os.path.join(\n FLAGS.model_dir, FLAGS.model_name), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_lm_architecture(args)\n\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = getattr(\n args, \"tokens_per_sample\", DEFAULT_MAX_TARGET_POSITIONS\n )\n\n if args.character_embeddings:\n embed_tokens = CharacterTokenEmbedder(\n task.source_dictionary,\n eval(args.character_filters),\n args.character_embedding_dim,\n args.decoder_embed_dim,\n args.char_embedder_highway_layers,\n )\n elif args.adaptive_input:\n embed_tokens = AdaptiveInput(\n len(task.source_dictionary),\n task.source_dictionary.pad(),\n args.decoder_input_dim,\n args.adaptive_input_factor,\n args.decoder_embed_dim,\n options.eval_str_list(args.adaptive_input_cutoff, type=int),\n args.quant_noise_pq,\n args.quant_noise_pq_block_size,\n )\n else:\n embed_tokens = cls.build_embedding(\n args, task.source_dictionary, args.decoder_input_dim\n )\n\n if args.tie_adaptive_weights:\n assert args.adaptive_input\n assert args.adaptive_input_factor == args.adaptive_softmax_factor\n assert (\n args.adaptive_softmax_cutoff == args.adaptive_input_cutoff\n ), \"{} != {}\".format(\n args.adaptive_softmax_cutoff, args.adaptive_input_cutoff\n )\n assert args.decoder_input_dim == args.decoder_output_dim\n\n decoder = LinearTransformerDecoder(\n args, task.target_dictionary, embed_tokens, no_encoder_attn=True\n )\n return cls(decoder)", "def from_program(self, program, shape_dict, scope):\n\n self.shape_dict = shape_dict\n if scope is None:\n import paddle\n\n scope = paddle.fluid.global_scope()\n self.check_unsupported_ops(program)\n self.extract_parameters(program, scope)\n self.ops_to_relay(program)\n\n output_names = list()\n for block in program.blocks:\n for op in block.ops:\n if op.type == \"fetch\":\n output_names.append(op.input(\"X\")[0])\n\n outputs = [self.nodes[name] for name in output_names]\n outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)\n\n free_vars = analysis.free_vars(outputs)\n func = _function.Function(free_vars, outputs)\n mod = IRModule.from_expr(func)\n return mod, self.params", "def test_forward_mobilenet_v1(accel_type=\"ethos-u55-256\"):\n np.random.seed(23)\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/\"\n \"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\",\n \"mobilenet_v1_1.0_224_quant.tflite\",\n )\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n input_tensor = \"input\"\n input_dtype = \"uint8\"\n input_shape = (1, 224, 224, 3)\n in_min, in_max = util.get_range_for_dtype_str(input_dtype)\n input_data = np.random.randint(in_min, high=in_max, size=input_shape, dtype=input_dtype)\n\n relay_mod, params = convert_to_relay(tflite_model_buf, input_data, \"input\")\n input_data = {input_tensor: input_data}\n output_data = generate_ref_data(relay_mod, input_data)\n\n mod = partition_for_ethosu(relay_mod, params)\n compiled_models = infra.build_source(\n mod, input_data, output_data, accel_type, output_tolerance=10\n )\n infra.verify_source(compiled_models, accel_type)", "def __compile_ir(self):\n self.builder.ret_void()\n llvm_ir = str(self.module)\n mod = self.binding.parse_assembly(llvm_ir)\n mod.verify()\n\n self.engine.add_module(mod)\n self.engine.finalize_object()\n self.engine.run_static_constructors()\n return mod", "def _make_model_v2():\n class CustomModule(tf.Module):\n\n def __init__(self):\n super().__init__()\n self.m = tf.Variable([1.0, 1.0, 1.0], name='slope')\n\n @tf.function\n def __call__(self, x):\n y = self.m * x + 1\n return y\n\n @tf.function(input_signature=[tf.TensorSpec((None, 3), tf.float32)])\n def length(self, x):\n return tf.reduce_sum(self(x) - x, name='length')\n\n @tf.function(input_signature=[tf.TensorSpec([], tf.float32),\n tf.TensorSpec((None, 3), tf.float32)])\n def scalar_multiply(self, z, x):\n return tf.multiply(z, x, name='scale_mult')\n\n module = CustomModule()\n\n # Make a concrete version of __call__\n call = module.__call__.get_concrete_function(tf.TensorSpec((None, 3)))\n\n tf.saved_model.save(\n module, tf_export_path, signatures={\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: call,\n 'length': module.length,\n 'scalar_multiply': module.scalar_multiply\n }\n )", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(sys.argv[1], 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create_inputs(params):\n sess = tf.Session()\n\n lr_images, hr_labels = [], []\n training_dir = params['training_dir'].format(params['ratio'])\n\n # Raise exception if user has not ran prepare_data.py yet\n if not os.path.isdir(training_dir):\n raise Exception(\"You must first run prepare_data.py before you can train\")\n\n lr_shape = (params['lr_size'], params['lr_size'], 3)\n hr_shape = output_shape = (params['lr_size'] - params['edge'], params['lr_size'] - params['edge'], 3 * params['ratio']**2)\n for file in os.listdir(training_dir):\n train_file = open(\"{}/{}\".format(training_dir, file), \"rb\")\n train_data = np.fromfile(train_file, dtype=np.uint8)\n\n lr_image = train_data[:17 * 17 * 3].reshape(lr_shape)\n lr_images.append(lr_image)\n\n hr_label = train_data[17 * 17 * 3:].reshape(hr_shape)\n hr_labels.append(hr_label)\n\n return lr_images, hr_labels", "def create_module(sbml_model_file, model_name, model_output_dir, condition_df,\n observable_df):\n\n from amici.petab_import import import_model\n import_model(sbml_model=sbml_model_file, observable_table=observable_df,\n model_name=model_name, model_output_dir=model_output_dir,\n verbose=True, condition_table=condition_df)", "def compile_IR(ir):\n triple = re.search(\n r'target\\s+triple\\s*=\\s*\"(?P<triple>[-\\d\\w\\W_]+)\"\\s*$',\n ir, re.M).group('triple')\n\n # Create execution engine\n target = llvm.Target.from_triple(triple)\n target_machine = target.create_target_machine()\n backing_mod = llvm.parse_assembly(\"\")\n engine = llvm.create_mcjit_compiler(backing_mod, target_machine)\n\n # Create LLVM module and compile\n mod = llvm.parse_assembly(ir)\n mod.verify()\n engine.add_module(mod)\n engine.finalize_object()\n engine.run_static_constructors()\n\n return engine", "def run_module(args, module_path, workspace, module_data):\n\n mod_path = module_path.replace('./', '')\n curr_path = os.getcwd()\n tfvar_path = module_path.replace('./components/', '')\n print(\"curr_path = {0}\".format(curr_path))\n print(\"DEBUG module_path = {0}\".format(module_path))\n module_name = module_path.split('/')[-1]\n print(\"DEBUG module_name = {0}\".format(module_name))\n\n key_config = \"\\\"key={0}/terraform.tfstate\\\"\".format(module_name)\n bucket_region_config = \"\\\"region={0}\\\"\".format(module_data[\"bucket_region\"])\n bucket_config = \"\\\"bucket={0}\\\"\".format(module_data[\"bucket\"])\n dynamodb_config = \"\\\"dynamodb_table={0}\\\"\".format(module_data[\"dynamodb\"])\n\n plan_output_file = \"plan.out\"\n tf_varfile = f\"{curr_path}/tfvars/{tfvar_path}/{workspace}.tfvars\"\n tf_varfile_common = f\"{curr_path}/tfvars/terraform.tfvars\"\n tf_varfile_tags = f\"{curr_path}/tfvars/core/taggings/{workspace}.tfvars\"\n backend_override = f\"{curr_path}/variables/config/backend_override.tf\"\n providers_override = f\"{curr_path}/variables/config/providers_override.tf\"\n\n softlinking_files(mod_path)\n\n remove_prev_run = f\"cd {module_path} && rm -f {plan_output_file} && rm -rf .terraform\"\n cp_override_cmd = f\"cd {module_path} && cp {backend_override} . && cp {providers_override} .\"\n\n tf_plan_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -out {plan_output_file} --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags}\"\n tf_plan_destroy_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -destroy --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags} -out {plan_output_file}\"\n tf_apply_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform apply {plan_output_file}\"\n tf_init_cmd = f\"cd {module_path} && terraform init --backend-config={key_config} --backend-config={bucket_region_config} --backend-config={dynamodb_config} --backend-config={bucket_config} && terraform workspace new {workspace} || terraform workspace select {workspace}\"\n print(tf_init_cmd) # let's leave this in\n\n os.system(remove_prev_run)\n os.system(cp_override_cmd)\n os.system(tf_init_cmd)\n\n if args.action.lower() == 'plan':\n # always auto approve 'plan' action\n os.system(tf_plan_cmd)\n elif args.action.lower() == 'plan-destroy':\n # always auto approve 'plan' action\n os.system(tf_plan_destroy_cmd)\n elif args.action.lower() == 'apply':\n if args.approve:\n # auto-approve flag enabled so skip user confirmation\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n os.system(tf_plan_cmd)\n # confirm with user first\n if user_confirmation(\"Sure you want to APPLY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")\n elif args.action.lower() == 'apply-destroy':\n if args.approve:\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n # confirm with user first\n os.system(tf_plan_destroy_cmd)\n if user_confirmation(\"Sure you want to APPLY DESTROY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")", "def _create_string_input_trainable_model():\n\n class BlockWithStringInputs(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.cast = onnxblock.blocks.Cast(to=onnx.TensorProto.FLOAT)\n self.linear = onnxblock.blocks.Linear(4, 2)\n\n def build(self, string_input):\n return self.linear(self.cast(string_input))\n\n string_block = BlockWithStringInputs()\n with onnxblock.empty_base() as model_accessor:\n model_accessor.model.graph.input.extend(\n [\n onnx.helper.make_tensor_value_info(\"input\", onnx.TensorProto.STRING, [1, 4]),\n ]\n )\n _ = string_block(\"input\")\n\n return string_block.to_model_proto()", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def __init__(\n self,\n module: Union[module_utils.CompiledModule, None],\n function: Union[Callable[[TracedModule], None], None],\n _load_dict: Optional[Dict[str, Any]] = None,\n ):\n if _load_dict is None:\n # Extract metadata from module and function.\n self.module_name = module.module_name\n self.compiled_paths = module.compiled_paths\n self.backend_name = module.backend_info.backend_name\n self.backend_id = module.backend_info.backend_id\n self.backend_driver = module.backend_info.driver\n self.iree_serializable = module.iree_serializable()\n self.tflite_serializable = module.tflite_serializable()\n self.function_name = function.__name__\n self.function_sourcefile = inspect.getsourcefile(function)\n source, start_line = inspect.getsourcelines(function)\n self.function_line_numbers = (start_line, start_line + len(source))\n self.function_source = \"\".join(source)\n\n self.calls = []\n else:\n self.module_name = _load_dict[\"module_name\"]\n self.compiled_paths = _load_dict[\"compiled_paths\"]\n self.backend_name = _load_dict[\"backend_name\"]\n self.backend_id = _load_dict[\"backend_id\"]\n self.backend_driver = _load_dict[\"backend_driver\"]\n self.iree_serializable = _load_dict[\"iree_serializable\"]\n self.tflite_serializable = _load_dict[\"tflite_serializable\"]\n self.function_name = _load_dict[\"function_name\"]\n self.function_sourcefile = _load_dict[\"function_sourcefile\"]\n self.function_line_numbers = _load_dict[\"function_line_numbers\"]\n self.function_source = _load_dict[\"function_source\"]\n self.calls = _load_dict[\"calls\"]", "def _init_from_file(self,params,weights_dict):\n\n self.name = params[keys._name]\n self.topology = params[keys._topology]\n self.learningRate = params[keys._learning_rate]\n self.momentum = params[keys._momentum]\n #self._outActiv_fun_key = params[keys._output_activation]\n #self._hiddenActiv_fun_key = params[keys._hidden_activation]\n #self.output_activation = self.set_outActivation_fun(func=self._outActiv_fun_key)\n #self.hidden_activation = self.set_hiddenactivation_fun(func=self._hiddenActiv_fun_key)\n\n #unpack weights\n self.weights = [weights_dict[layer_mat] for layer_mat in weights_dict]\n self.size = len(self.weights)\n self.Gradients = [None]*self.size", "def mtf_bitransformer_base():\n hparams = mtf_transformer2_base()\n hparams.max_length = 256\n hparams.shared_embedding = True\n # HYPERPARAMETERS FOR THE LAYER STACKS\n hparams.add_hparam(\"encoder_layers\", [\"self_att\", \"drd\"] * 6)\n hparams.add_hparam(\"decoder_layers\", [\"self_att\", \"enc_att\", \"drd\"] * 6)\n hparams.add_hparam(\"encoder_num_layers\", 6)\n hparams.add_hparam(\"decoder_num_layers\", 6)\n # number of heads in multihead attention\n hparams.add_hparam(\"encoder_num_heads\", 8)\n hparams.add_hparam(\"decoder_num_heads\", 8)\n hparams.add_hparam(\"local_attention_radius\", 128)\n\n # default of 0 for standard transformer behavior\n # 1 means a single set of keys and values that are read by all query heads\n hparams.add_hparam(\"encoder_num_memory_heads\", 0)\n hparams.add_hparam(\"decoder_num_memory_heads\", 0)\n # share attention keys and values\n hparams.add_hparam(\"encoder_shared_kv\", False)\n hparams.add_hparam(\"decoder_shared_kv\", False)\n\n # Parameters for computing the maximum decode length in beam search.\n # Maximum decode length is:\n # min(max_length,\n # decode_length_multiplier * input_length + decode_length_constant)\n hparams.add_hparam(\"decode_length_multiplier\", 1.5)\n hparams.add_hparam(\"decode_length_constant\", 10.0)\n # used during decoding\n hparams.add_hparam(\"alpha\", 0.6)\n hparams.sampling_temp = 0.0\n return hparams", "def build_and_run(mod: IRModule, target: Target, dev_type: str) -> np.ndarray:\n rt_mod = tvm.build(mod, target=target)\n return run_module_via_rpc(\n rpc_config=rpc_config,\n lib=rt_mod,\n dev_type=dev_type,\n args={i: v for i, v in enumerate(inputs)}, # pylint: disable=unnecessary-comprehension\n continuation=create_calculator(backend=\"tir\"),\n backend=\"tir\",\n )", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def generate_ref_data(mod, input_data, params=None, target=\"llvm\"):\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lib = relay.build(mod, target=target, params=params)\n\n lib_name = \"mod.so\"\n temp = utils.tempdir()\n lib_path = temp.relpath(lib_name)\n lib.export_library(lib_path)\n lib = tvm.runtime.load_module(lib_path)\n grt_mod = graph_executor.GraphModule(lib[\"default\"](tvm.cpu()))\n grt_mod.set_input(**input_data)\n grt_mod.run()\n output_count = grt_mod.get_num_outputs()\n out = [grt_mod.get_output(i).numpy() for i in range(output_count)]\n if isinstance(mod, tvm.relay.Function):\n main = mod\n else:\n main = mod[\"main\"]\n if main.attrs is None or main.attrs[\"output_tensor_names\"] is None:\n output_tensor_names = (\n [\"output\"] if output_count == 1 else [f\"output{i}\" for i in range(output_count)]\n )\n else:\n output_tensor_names = main.attrs[\"output_tensor_names\"]\n\n return dict(zip(output_tensor_names, out))", "def create_graph():\n\t# Creates graph from saved graph_def.pb.\n\twith tf.gfile.FastGFile('./models/inception_v4.pb', 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\t_ = tf.import_graph_def(graph_def, name='')", "def convert_from_frozen_graph():\n input_arrays = [\"input\"]\n converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(\n graph_def_file='/media/main/Data/Task/RPiCigDetector/utils/test_model/frozen_inference_graph.pb',\n # both `.pb` and `.pbtxt` files are accepted.\n input_arrays=['input'],\n input_shapes={'input': [1, 224, 224, 3]},\n output_arrays=['MobilenetV1/Predictions/Softmax']\n )\n converter.allow_custom_ops = True\n # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\n # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_type = tf.lite.constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {input_arrays[0]: (128, 128)}\n tflite_model = converter.convert()\n\n # Save the model.\n with open('model.tflite', 'wb') as f:\n f.write(tflite_model)", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "def cmsisnn_compiler(relay_func):\n mod = tvm.IRModule()\n mod[\"main\"] = relay_func\n mod = relay.transform.InferType()(mod)\n func_name = relay_func.attrs[\"global_symbol\"]\n tir_mod = relay_to_tir(func_name, mod[\"main\"])\n cmsisnn_runtime = tvm._ffi.get_global_func(\"runtime.module.cmsisnn.create\")\n return cmsisnn_runtime(tir_mod)", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(modelFullPath, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def resnet34(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def Translate(self):\n mojom_file = self._graph.files[self._file_name]\n\n mod = self._module\n self.PopulateModuleMetadata(mod, mojom_file)\n\n mod.imports = []\n if mojom_file.imports:\n mod.imports = [self.ImportFromMojom(imp) for imp in mojom_file.imports]\n # When translating an imported type, its SourceFileInfo.file_name is a key\n # into self._imports. The value is the module from which the type was\n # imported.\n self._imports = {imp['module'].path: imp for imp in mod.imports}\n\n if mojom_file.declared_mojom_objects:\n if mojom_file.declared_mojom_objects.top_level_constants:\n mod.constants = [\n self.ConstFromMojom(\n self._graph.resolved_values[key].declared_constant, None)\n for key in mojom_file.declared_mojom_objects.top_level_constants]\n\n user_defined_types = ['interfaces', 'structs', 'unions']\n for user_defined_type in user_defined_types:\n if getattr(mojom_file.declared_mojom_objects, user_defined_type):\n setattr(mod, user_defined_type, [self.UserDefinedFromTypeKey(key)\n for key in getattr(\n mojom_file.declared_mojom_objects, user_defined_type)])\n if mojom_file.declared_mojom_objects.top_level_enums:\n mod.enums = [self.UserDefinedFromTypeKey(key)\n for key in mojom_file.declared_mojom_objects.top_level_enums]\n\n return mod", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n\n return model", "def __init__(self, target, device_id):\n device_id = int(device_id)\n self.context = tvm.ndarray.context(target, device_id)\n if target == 'cpu':\n target = 'llvm'\n self.target = target\n if not self.context.exist:\n raise RuntimeError(\"No hardware to support selected target \"\n f\"'{target}' on device {device_id}\")\n self.compiler = compiler\n self.to_backend_value = RelayInputConverter(self.context)\n self.from_backend_value = RelayOutputConverter()", "def model_wrapper(self):\n original = self.args.rnn_type\n if(self.args.rnn_type=='DeepCoNN'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='TRANSNET'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM_TNET'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='DATT'):\n self.args.rnn_type ='RAW_MSE_DUAL_DOT'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='MPCN'):\n self.args.rnn_type = 'RAW_MSE_MPCN_FN_FM'\n self.args.base_encoder = 'NBOW'\n\n print(\"Conversion to {} | base:{}\".format(\n self.args.rnn_type,\n self.args.base_encoder))", "def parse(filename):\n\n # Copy the content from given file to a local list\n with open(filename, 'r') as fp:\n content = [line for line in (line.strip() for line in fp) if line]\n\n # Initialize a dictionary to store the parsed data\n data = {\n 'module_name': '',\n 'input': [],\n 'output': [],\n 'wire': [],\n 'reg': [],\n 'connections': []\n }\n\n # Get module name\n if 'module' in content[0][:7]:\n data['module_name'] = re.search(r'e.*\\(', content[0]).group()[1:-1].strip()\n else:\n print(\"Module name not present!\")\n exit(0)\n\n try:\n for line in content[1:-1]:\n # Get module parameters\n keywords = ['input', 'output', 'wire', 'reg']\n for key in keywords:\n if key in line[:len(key) + 1]:\n parse_line(data, line, key)\n\n # Get connections\n if any(x in line[:5] for x in ['nand', 'nor', 'not', 'xor', 'and', 'or', 'xnor']):\n gate = re.search(r' (.*)\\(', line).group(1).strip()\n inputs = [s.strip() for s in re.search(r'\\((.*)\\)', line).group(1).split(',')]\n for i in inputs[1:]:\n data['connections'].append((i, gate))\n data['connections'].append((gate, inputs[0]))\n except:\n print(\"Not supported!\")\n exit(0)\n\n return data", "def build_model(\n data: Dict[str, np.array]\n) -> Tuple[popxl.Ir, Dict[str, DeviceToHostStream]]:\n ir = popxl.Ir()\n main = ir.main_graph\n\n with main:\n # Placeholder for tensor ids\n tensors = {}\n # Create variable tensors from the data\n for name in data.keys():\n tensors[name] = popxl.variable(data[name], name=name)\n\n # Placeholder for device to host streams\n d2h_streams = {}\n\n # Store and load the first tensor\n remote_buffer_1 = RemoteBuffer(\n tensor_shape=tensors[\"store_in_1\"]._pb_tensor.info.shape(),\n tensor_dtype=dtype.as_dtype(\n tensors[\"store_in_1\"]._pb_tensor.info.data_type_lcase()\n ),\n entries=1,\n )\n offset_tensor_1 = popxl.constant(0, name=\"offset_1\")\n # Ensure that the ops are in the order we define them in\n with popxl.in_sequence(True):\n ops.remote_store(\n remote_buffer=remote_buffer_1,\n offset=offset_tensor_1,\n t=tensors[\"store_in_1\"],\n )\n tensors[\"load_out_1\"] = ops.remote_load(\n remote_buffer=remote_buffer_1, offset=offset_tensor_1, name=\"load_out_1\"\n )\n tensors[\"load_out_1_inplace\"] = ops.remote_load_(\n remote_buffer=remote_buffer_1,\n offset=offset_tensor_1,\n t=tensors[\"load_in_1_inplace\"],\n )\n # Anchor the input tensors to the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_1\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_1_inplace\")\n # Anchor the output tensors of the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_1\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_1_inplace\")\n\n # Store and load the second and third tensor using a new buffer id\n remote_buffer_2 = RemoteBuffer(\n tensor_shape=tensors[\"store_in_2\"]._pb_tensor.info.shape(),\n tensor_dtype=dtype.as_dtype(\n tensors[\"store_in_2\"]._pb_tensor.info.data_type_lcase()\n ),\n entries=2,\n )\n # Index starts at 0\n offset_tensor_2 = popxl.constant(0, name=\"offset_2\")\n offset_tensor_3 = 1 # Test that the int version of offset works\n ops.remote_store(\n remote_buffer=remote_buffer_2,\n offset=offset_tensor_2,\n t=tensors[\"store_in_2\"],\n )\n ops.remote_store(\n remote_buffer=remote_buffer_2,\n offset=offset_tensor_3,\n t=tensors[\"store_in_3\"],\n )\n tensors[\"load_out_2\"] = ops.remote_load(\n remote_buffer=remote_buffer_2, offset=offset_tensor_2, name=\"load_out_2\"\n )\n tensors[\"load_out_3_inplace\"] = ops.remote_load_(\n remote_buffer=remote_buffer_2,\n offset=offset_tensor_3,\n t=tensors[\"load_in_3_inplace\"],\n )\n\n # Anchor the input tensors to the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_2\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_3_inplace\")\n # Anchor the output tensors of the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_2\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_3_inplace\")\n\n return ir, d2h_streams", "def resnet34(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['resnet34']))\n return model", "def create_module(module_dict: Dict[str, Any], nets: List[Net]) -> Module:\n m_data = module_dict['module']\n footprint = m_data[0].replace('\"', \"\")\n layer = convert_to_layers(get_dict_by_key(m_data, 'layer')['layer'])[0]\n coords = get_dict_by_key(m_data, 'at')['at']\n if len(coords) == 3 and \"B.\" in layer.name:\n coords[2] = (float(coords[2]) + 180) % 360\n coords[1] = str(-1*float(coords[1]))\n attr = get_dict_by_key(m_data, 'attr')\n smd: bool = True if (attr and attr['attr'] == 'smd') else False\n module_texts: List[FpText] = get_texts(m_data, 'fp_text')\n figures: List[Union[FpPoly, FpCircle, FpArc, FpLine]] = get_lines(m_data, 'fp_line')\n figures.extend(get_circles(m_data, 'fp_circle'))\n pads = get_pads(m_data, nets)\n ref = [text.text for text in module_texts if text.text_type ==TextType.reference][0]\n update_nets_with_pads(pads, nets, ref)\n figures.extend(get_polys(m_data, 'fp_poly'))\n figures.extend(get_arcs(m_data, 'fp_arc'))\n return Module(footprint=footprint, layer=layer, coords=coords, smd=smd,\n texts=module_texts, pads=pads, figures=figures, extrapads=list())", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(FLAGS.model_dir, r'/home/lg/Desktop/finetune/frozen_inception_v3_299.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def,return_elements=['InceptionV3/Predictions/Reshape_1:0'], name='lg')", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def make_parser(data):\n # type: (str) -> RelayParser\n input_stream = InputStream(data)\n lexer = RelayLexer(input_stream)\n token_stream = CommonTokenStream(lexer)\n return RelayParser(token_stream)", "def create_graph():\n\t# Creates graph from saved graph_def.pb.\n\twith tf.gfile.FastGFile(modelFullPath, 'rb') as f:\n\t\tgraph_def = tf.GraphDef()\n\t\tgraph_def.ParseFromString(f.read())\n\t\t_ = tf.import_graph_def(graph_def, name='')", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def create_graph():\n with tf.gfile.FastGFile(os.path.join(\n config['inference']['model_dir'], 'output_graph.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def create_module(self, body: list, **kwargs):\n return ast.Module(body=body)", "def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)", "def file_based_input_fn_builder(input_file, seq_length, max_predictions_per_seq, is_training,\n drop_remainder, output_mode):\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"masked_lm_positions\": tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\": tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\": tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n if output_mode == 'classification':\n name_to_features[\"label_ids\"] = tf.FixedLenFeature([], tf.int64)\n elif output_mode == 'regression':\n name_to_features[\"label_ids\"] = tf.FixedLenFeature([], tf.float32)\n else:\n raise KeyError(output_mode)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n print(name)\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n #batch_size = params[\"batch_size\"]\n batch_size = FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n if FLAGS.use_horovod:\n d = d.shard(hvd.size(), hvd.rank())\n d = d.repeat()\n d = d.shuffle(buffer_size=100, seed=FLAGS.random_seed)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n return d\n\n return input_fn", "def load(call_dir: str) -> ModuleCall:\n with open(os.path.join(call_dir, \"metadata.pkl\"), \"rb\") as f:\n kwargs = pickle.load(f)\n\n for result_type in [\"input\", \"output\"]:\n key = f\"{result_type}s\" # inputs or outputs\n kwargs[key] = []\n\n files = glob.glob(os.path.join(call_dir, f\"{result_type}_*.pkl\"))\n for filename in sorted(files):\n with open(filename, \"rb\") as f:\n kwargs[key].append(pickle.load(f))\n\n # Convert to tuple to match python's return type for multiple results.\n kwargs[key] = tuple(kwargs[key])\n\n return ModuleCall(**kwargs)", "def loadLogicFromBinary(tapeString):\n\tpass", "def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):\n\n model = get_detector(cfg, checkpoint_path, device=\"cpu\")\n one_img, one_meta = preprocess_example_input(input_config)\n tensor_data = [one_img]\n model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)\n\n return model, tensor_data", "def __init__(self, options_file: str,\n use_character_inputs=True,\n embedding_weight_file=None,\n max_batch_size=128, *args, **kwargs):\n super().__init__(*args, **kwargs)\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n\n if not use_character_inputs:\n if embedding_weight_file is None:\n raise ValueError(\n \"embedding_weight_file is required input with \"\n \"not use_character_inputs\"\n )\n self._options = options\n self._embedding_weight_file = embedding_weight_file\n self._use_character_inputs = use_character_inputs\n self._max_batch_size = max_batch_size\n\n self._ops = {}\n self.lm_graph = BidirectionalLanguageModelGraph(self._options, name='bilm',\n embedding_weight_file=self._embedding_weight_file,\n use_character_inputs=self._use_character_inputs,\n max_batch_size=self._max_batch_size)", "def build_input_fn(file_pattern, \n batch_size=1, \n shuffle=False, \n max_length=None,\n repeat_count=1):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64)\n }\n data_items_to_decoders = {\n field: tf.contrib.slim.tfexample_decoder.Tensor(field)\n for field in data_fields}\n\n def decode_record(record):\n \"\"\"Serialized Example to dict of <feature name, Tensor>.\"\"\"\n decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(\n data_fields, data_items_to_decoders)\n\n decode_items = list(data_items_to_decoders)\n decoded = decoder.decode(record, items=decode_items)\n return dict(zip(decode_items, decoded))\n\n data_files = tf.contrib.slim.parallel_reader.get_data_files(file_pattern)\n if shuffle:\n random.shuffle(data_files)\n dataset = tf.data.TFRecordDataset(data_files)\n dataset = dataset.map(decode_record)\n if max_length:\n dataset = dataset.filter(\n functools.partial(\n example_valid_size,\n max_length=max_length,\n ))\n dataset = dataset.repeat(repeat_count)\n if shuffle:\n dataset = dataset.shuffle(buffer_size=FLAGS.batch_size * 100)\n dataset = dataset.padded_batch(FLAGS.batch_size, padded_shapes={\n \"inputs\": [None],\n \"targets\": [None]})\n iterator = dataset.make_one_shot_iterator()\n batch_features = iterator.get_next()\n return batch_features", "def input_fn():\n files = tf.data.Dataset.list_files(os.path.join(\n tft_working_dir, filebase + '*'))\n dataset = files.interleave(\n tf.data.TFRecordDataset, cycle_length=4, block_length=16)\n dataset = dataset.map(parser)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n iterator = dataset.make_one_shot_iterator()\n transformed_features, transformed_labels = iterator.get_next()\n\n return transformed_features, transformed_labels", "def build_inputs(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n\n elif self.mode == \"test\":\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n \n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.input_queue_capacity,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.batch_size)\n s1, s2, label = input_ops.parse_example_batch(\n serialized)\n\n encode_ids1 = s1.ids\n encode_ids2 = s2.ids\n\n encode_mask1 = s1.mask\n encode_mask2 = s2.mask\n \n\n\n self.encode_ids1 = encode_ids1\n self.encode_ids2 = encode_ids2\n\n self.encode_mask1 = encode_mask1\n self.encode_mask2 = encode_mask2\n\n self.label = label", "def make_tflite_inference(ndvi_img_array, model_interpreter):\n # Get input and output tensors.\n input_details = model_interpreter.get_input_details()\n output_details = model_interpreter.get_output_details()\n\n # Get Input shape\n input_shape = input_details[0]['shape']\n input_data = ndvi_img_array.reshape(input_shape)\n\n model_interpreter.set_tensor(input_details[0]['index'], input_data)\n model_interpreter.invoke()\n\n outputs = []\n\n for tensor in output_details:\n output_data = model_interpreter.get_tensor(tensor['index'])\n outputs.append(output_data[0][0])\n\n prediction = outputs[0]\n\n return prediction", "def input_fn(params=None):\n del params\n full_pattern = os.path.join(flags.data_dir, pattern)\n dataset = tf.data.Dataset.list_files(full_pattern)\n\n if flags.initial_shuffle_buffer_size > 0:\n dataset = dataset.shuffle(buffer_size=flags.initial_shuffle_buffer_size)\n dataset = dataset.repeat()\n\n # use interleave() and prefetch() to read many files concurrently\n def prefetch_map_fn(filename):\n return tf.data.TFRecordDataset(filename).prefetch(batch_size)\n\n if flags.prefetch_enabled:\n dataset = dataset.interleave(\n prefetch_map_fn,\n cycle_length=flags.cycle_length,\n block_length=batch_size)\n\n if flags.followup_shuffle_buffer_size > 0:\n dataset = dataset.shuffle(buffer_size=flags.followup_shuffle_buffer_size)\n\n frame_nums = range(0, flags.sequence_length, flags.skip_num)\n\n def parser(_, serialized_example):\n \"\"\"Parses a single example.\"\"\"\n features = {}\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n features[pose_name] = tf.FixedLenFeature([flags.pose_dim], tf.float32)\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n features[action_name] = tf.FixedLenFeature([flags.pose_dim],\n tf.float32)\n features[joint_pos_name] = tf.FixedLenFeature([flags.joint_pos_dim],\n tf.float32)\n else:\n features[image_name] = tf.FixedLenFeature([1], tf.string)\n\n parsed_input = tf.parse_single_example(serialized_example, features)\n\n for i in frame_nums:\n image_name = 'image_' + str(i)\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH),\n method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n return image_seq, action_seq, action_seq, joint_pos_seq\n\n dataset = dataset.map(\n parser,\n num_parallel_calls=flags.num_parallel_calls).prefetch(batch_size)\n\n dataset = dataset.batch(batch_size)\n\n # use prefetch to overlap producer and consumer\n dataset = dataset.prefetch(1)\n\n images, actions, poses, joint_pos = dataset.make_one_shot_iterator(\n ).get_next()\n\n images.set_shape([batch_size, len(frame_nums), IMG_HEIGHT, IMG_WIDTH, 3])\n actions.set_shape([batch_size, len(frame_nums), flags.pose_dim])\n poses.set_shape([batch_size, len(frame_nums), flags.pose_dim])\n joint_pos.set_shape([batch_size, len(frame_nums), flags.joint_pos_dim])\n\n joint_poses = tf.concat([joint_pos, poses], 2)\n\n output_features = {\n IMAGE_FEATURE_NAME: images,\n JOINT_POSE_FEATURE_NAME: joint_poses,\n ACTION_FEATURE_NAME: actions\n }\n\n return output_features, None", "def model_fn(model_dir):\n \n sym, arg_params, aux_params = mx.model.load_checkpoint('%s/102flowers' % model_dir, 0)\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)\n mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], label_shapes=mod._label_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=True)\n return mod", "def build_engine(model_name, do_int8, dla_core, verbose=False):\n cfg_file_path = model_name + '.cfg'\n parser = DarkNetParser()\n layer_configs = parser.parse_cfg_file(cfg_file_path)\n net_c = get_c(layer_configs)\n net_h, net_w = get_h_and_w(layer_configs)\n\n print('Loading the ONNX file...')\n onnx_data = load_onnx(model_name)\n if onnx_data is None:\n return None\n\n TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) if verbose else trt.Logger()\n EXPLICIT_BATCH = [] if trt.__version__[0] < '7' else \\\n [1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)]\n with trt.Builder(TRT_LOGGER) as builder, builder.create_network(*EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:\n if do_int8 and not builder.platform_has_fast_int8:\n raise RuntimeError('INT8 not supported on this platform')\n if not parser.parse(onnx_data):\n print('ERROR: Failed to parse the ONNX file.')\n for error in range(parser.num_errors):\n print(parser.get_error(error))\n return None\n network = set_net_batch(network, MAX_BATCH_SIZE)\n\n print('Adding yolo_layer plugins.')\n network = add_yolo_plugins(network, model_name, TRT_LOGGER)\n\n print('Adding a concatenated output as \"detections\".')\n network = add_concat(network, model_name, TRT_LOGGER)\n\n print('Naming the input tensort as \"input\".')\n network.get_input(0).name = 'input'\n\n print('Building the TensorRT engine. This would take a while...')\n print('(Use \"--verbose\" or \"-v\" to enable verbose logging.)')\n if trt.__version__[0] < '7': # older API: build_cuda_engine()\n if dla_core >= 0:\n raise RuntimeError('DLA core not supported by old API')\n builder.max_batch_size = MAX_BATCH_SIZE\n builder.max_workspace_size = 1 << 30\n builder.fp16_mode = True # alternative: builder.platform_has_fast_fp16\n if do_int8:\n from calibrator import YOLOEntropyCalibrator\n builder.int8_mode = True\n builder.int8_calibrator = YOLOEntropyCalibrator(\n 'calib_images', (net_h, net_w), 'calib_%s.bin' % model_name)\n engine = builder.build_cuda_engine(network)\n else: # new API: build_engine() with builder config\n builder.max_batch_size = MAX_BATCH_SIZE\n config = builder.create_builder_config()\n config.max_workspace_size = 1 << 30\n config.set_flag(trt.BuilderFlag.GPU_FALLBACK)\n config.set_flag(trt.BuilderFlag.FP16)\n profile = builder.create_optimization_profile()\n profile.set_shape(\n 'input', # input tensor name\n (MAX_BATCH_SIZE, net_c, net_h, net_w), # min shape\n (MAX_BATCH_SIZE, net_c, net_h, net_w), # opt shape\n (MAX_BATCH_SIZE, net_c, net_h, net_w)) # max shape\n config.add_optimization_profile(profile)\n if do_int8:\n from calibrator import YOLOEntropyCalibrator\n config.set_flag(trt.BuilderFlag.INT8)\n config.int8_calibrator = YOLOEntropyCalibrator(\n 'calib_images', (net_h, net_w),\n 'calib_%s.bin' % model_name)\n config.set_calibration_profile(profile)\n if dla_core >= 0:\n config.default_device_type = trt.DeviceType.DLA\n config.DLA_core = dla_core\n config.set_flag(trt.BuilderFlag.STRICT_TYPES)\n print('Using DLA core %d.' % dla_core)\n engine = builder.build_engine(network, config)\n\n if engine is not None:\n print('Completed creating engine.')\n return engine", "def _load_from_v1(definition: 'ModuleDefinitionV1',\n parent: Location,\n api_level: APIVersion) -> ModuleGeometry:\n mod_name = definition['loadName']\n model_lookup: Mapping[str, ModuleModel] = {\n 'thermocycler': ThermocyclerModuleModel.THERMOCYCLER_V1,\n 'magdeck': MagneticModuleModel.MAGNETIC_V1,\n 'tempdeck': TemperatureModuleModel.TEMPERATURE_V1}\n type_lookup = {\n 'thermocycler': ModuleType.THERMOCYCLER,\n 'tempdeck': ModuleType.TEMPERATURE,\n 'magdeck': ModuleType.MAGNETIC\n }\n model = model_lookup[mod_name]\n offset = Point(definition[\"labwareOffset\"][\"x\"],\n definition[\"labwareOffset\"][\"y\"],\n definition[\"labwareOffset\"][\"z\"])\n overall_height = definition[\"dimensions\"][\"bareOverallHeight\"]\\\n\n height_over_labware = definition[\"dimensions\"][\"overLabwareHeight\"]\n\n if model in ThermocyclerModuleModel:\n lid_height = definition['dimensions']['lidHeight']\n mod: ModuleGeometry = \\\n ThermocyclerGeometry(definition[\"displayName\"],\n model,\n type_lookup[mod_name],\n offset,\n overall_height,\n height_over_labware,\n lid_height,\n parent,\n api_level)\n else:\n mod = ModuleGeometry(definition['displayName'],\n model,\n type_lookup[mod_name],\n offset,\n overall_height,\n height_over_labware,\n parent, api_level)\n return mod", "def res_inabp_34(imagenet_name=False, **kwargs):\n if imagenet_name:\n imagenet_name = 'resnet34'\n else:\n imagenet_name = None\n model = res_INABP(BasicBlock, [3, 4, 6, 3], **kwargs)\n model.load_pretrained_weights(imagenet_name)\n return model", "def _build_rnn(self, input_tensor):\n\n w_trainable = False\n x_shift_trainable = False\n eta_trainable = True\n\n input_shape = input_tensor.get_shape().as_list()\n input_area = np.prod(input_shape[1:])\n batch_input_shape = (-1, input_area)\n\n filters = self._hparams.filters + self._hparams.bias_neurons\n hidden_size = [filters]\n weights_shape = [filters, filters]\n\n with tf.variable_scope(\"rnn\"):\n init_state_pl = self._dual.add('init_pl', shape=hidden_size, default_value=0).add_pl()\n init_hebb_pl = self._dual.add('hebb_init_pl', shape=weights_shape, default_value=0).add_pl()\n\n # ensure init placeholders are being reset every iteration\n init_hebb_pl = tf_print(init_hebb_pl, \"Init Hebb:\", summarize=100, mute=True)\n\n # Input reshape: Ensure flat (vector) x batch size input (batches, inputs)\n # -----------------------------------------------------------------\n input_vector = tf.reshape(input_tensor, batch_input_shape, name='input_vector')\n\n # unroll input into a series so that we can iterate over it easily\n x_series = tf.unstack(input_vector, axis=0, name=\"ep-series\") # batch_size of hidden_size\n\n # get the target and degraded samples\n target = input_vector[-1]\n target = tf_print(target, \"TARGET\\n\", mute=True)\n degraded_extracted = input_vector[-2]\n degraded_extracted = tf_print(degraded_extracted, \"DEGRADED-extracted\\n\", mute=True)\n self._dual.set_op('target', target)\n self._dual.set_op('degraded_raw', degraded_extracted)\n\n y_current = tf.reshape(init_state_pl, [1, filters], name=\"init-curr-state\")\n hebb = init_hebb_pl\n\n with tf.variable_scope(\"slow-weights\"):\n w_default = 0.01\n alpha_default = 0.1\n eta_default = 0.1\n x_shift_default = 0.01\n bias_default = 1.0 * w_default # To emulate the Miconi method of having an additional input at 20 i.e.\n # it creates an output of 1.0, and this is multiplied by the weight (here we have straight bias, no weight)\n\n if w_trainable:\n w = tf.get_variable(name=\"w\", initializer=(w_default * tf.random_uniform(weights_shape)))\n else:\n w = tf.zeros(weights_shape)\n\n alpha = tf.get_variable(name=\"alpha\", initializer=(alpha_default * tf.random_uniform(weights_shape)))\n\n if eta_trainable:\n eta = tf.get_variable(name=\"eta\", initializer=(eta_default * tf.ones(shape=[1])))\n else:\n eta = eta_default * tf.ones([1])\n\n if x_shift_trainable:\n x_shift = tf.get_variable(name=\"x_shift\", initializer=(x_shift_default * tf.ones(shape=[1])))\n else:\n x_shift = 0\n\n self._dual.set_op('w', w)\n self._dual.set_op('alpha', alpha)\n self._dual.set_op('eta', eta)\n self._dual.set_op('x_shift', x_shift)\n\n if self._hparams.bias:\n bias = tf.get_variable(name=\"bias\", initializer=(bias_default * tf.ones(filters)))\n self._dual.set_op('bias', bias)\n bias = tf_print(bias, \"*** bias ***\", mute=MUTE_DEBUG_GRAPH)\n\n with tf.variable_scope(\"layers\"):\n hebb = tf_print(hebb, \"*** initial hebb ***\", mute=MUTE_DEBUG_GRAPH)\n y_current = tf_print(y_current, \"*** initial state ***\")\n w = tf_print(w, \"*** w ***\", mute=MUTE_DEBUG_GRAPH)\n alpha = tf_print(alpha, \"*** alpha ***\", mute=MUTE_DEBUG_GRAPH)\n\n i = 0\n last_x = None\n outer_first = None\n outer_last = None\n for x in x_series:\n # last sample is target, so don't process it again\n if i == len(x_series) - 1: # [0:x, 1:d, 2:t], l=3\n break\n layer_name = \"layer-\" + str(i)\n with tf.variable_scope(layer_name):\n x = self._hparams.bt_amplify_factor * x\n x = tf_print(x, str(i) + \": x_input\", mute=MUTE_DEBUG_GRAPH)\n y_current = tf_print(y_current, str(i) + \": y(t-1)\", mute=MUTE_DEBUG_GRAPH)\n\n # neurons latch on as they have bidirectional connections\n # attempt to remove this issue by knocking out lateral connections\n remove = 'random'\n if remove == 'circular':\n diagonal_mask = tf.convert_to_tensor(np.tril(np.ones(weights_shape, dtype=np.float32), 0))\n alpha = tf.multiply(alpha, diagonal_mask)\n elif remove == 'random':\n size = np.prod(weights_shape[:])\n knockout_mask = np.ones(size)\n knockout_mask[:int(size / 2)] = 0\n np.random.shuffle(knockout_mask)\n knockout_mask = np.reshape(knockout_mask, weights_shape)\n alpha = tf.multiply(alpha, knockout_mask)\n\n # ---------- Calculate next output of the RNN\n weighted_sum = tf.add(tf.matmul(y_current - x_shift,\n tf.add(w, tf.multiply(alpha, hebb, name='lyr-mul'), name=\"lyr-add_w_ah\"),\n name='lyr-mul-add-matmul'),\n x, \"weighted_sum\")\n\n if self._hparams.bias:\n weighted_sum = tf.add(weighted_sum, bias) # weighted sum with bias\n\n y_next, _ = activation_fn(weighted_sum, self._hparams.nonlinearity)\n\n with tf.variable_scope(\"fast_weights\"):\n # ---------- Update Hebbian fast weights\n # outer product of (yin * yout) = (current_state * next_state)\n outer = tf.matmul(tf.reshape(y_current, shape=[filters, 1]),\n tf.reshape(y_next, shape=[1, filters]),\n name=\"outer-product\")\n outer = tf_print(outer, str(i) + \": *** outer = y(t-1) * y(t) ***\", mute=MUTE_DEBUG_GRAPH)\n\n if i == 1: # first outer is zero\n outer_first = outer\n outer_last = outer\n\n hebb = (1.0 - eta) * hebb + eta * outer\n hebb = tf_print(hebb, str(i) + \": *** hebb ***\", mute=MUTE_DEBUG_GRAPH)\n\n # record for visualisation the output when presented with the last blank\n idx_blank_first = self._blank_indices[-1][0]\n idx_blank_last = self._blank_indices[-1][1]\n\n if i == idx_blank_first:\n blank_output_first = y_next\n self._dual.set_op('blank_output_first', blank_output_first)\n\n if i == idx_blank_last:\n blank_output_last = y_next\n self._dual.set_op('blank_output_last', blank_output_last)\n\n y_current = y_next\n last_x = x\n i = i + 1\n\n self._dual.set_op('hebb', hebb)\n self._dual.set_op('outer_first', outer_first)\n self._dual.set_op('outer_last', outer_last)\n\n last_x = tf_print(last_x, str(i) + \": LAST-X\", mute=True)\n self._dual.set_op('degraded', last_x)\n\n output_pre_masked = tf.squeeze(y_current)\n self._dual.set_op('output_pre_masked', output_pre_masked) # pre-masked output\n\n # External masking\n # -----------------------------------------------------------------\n with tf.variable_scope(\"masking\"):\n mask_pl = self._dual.add('mask', shape=hidden_size, default_value=1.0).add_pl()\n y_masked = tf.multiply(y_current, mask_pl, name='y_masked')\n\n # Setup the training operations\n # -----------------------------------------------------------------\n with tf.variable_scope(\"optimizer\"):\n loss_op = self._build_loss_op(y_masked, target)\n self._dual.set_op('loss', loss_op)\n\n self._optimizer = tf.train.AdamOptimizer(self._hparams.learning_rate)\n training_op = self._optimizer.minimize(loss_op,\n global_step=tf.train.get_or_create_global_step(), name='training_op')\n self._dual.set_op('training', training_op)\n\n return y_masked, y_masked", "def create_example(filename, hparams):\n wav_data = audio_io.samples_to_wav_data( # class bytes\n librosa.util.normalize(librosa.core.load(\n filename, sr=hparams.sample_rate)[0]), hparams.sample_rate)\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'id':\n tf.train.Feature(bytes_list=tf.train.BytesList(\n value=[filename.encode('utf-8')]\n )),\n 'sequence':\n tf.train.Feature(bytes_list=tf.train.BytesList(\n value=[music_pb2.NoteSequence().SerializeToString()]\n )),\n 'audio':\n tf.train.Feature(bytes_list=tf.train.BytesList(\n value=[wav_data]\n )),\n 'velocity_range':\n tf.train.Feature(bytes_list=tf.train.BytesList(\n value=[music_pb2.VelocityRange().SerializeToString()]\n )),\n }))\n return example.SerializeToString()", "def model_fn(model_dir):\n\n net = gluon.nn.SymbolBlock.imports('%s/model.json' % model_dir,\n ['data'], \n param_file='%s/model.params' % model_dir,\n ctx=mx.cpu())\n\n return net", "def __init__(self, row_size):\r\n self.tn = TraderNetwork(row_size).double() \r\n self.tn.load_state_dict(torch.load(\"./submission/example_model.pt\")) \r\n self.tn.eval()", "def __init__(self):\r\n ScriptedLoadableModuleLogic.__init__(self)\r\n self.rgbport = 18944\r\n self.depthPort = 18945", "def build_engine():\n with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:\n builder.max_workspace_size = 1 << 30 # 1GB\n builder.max_batch_size = 1\n builder.fp16_mode = mode_fp16\n # builder.int8_mode = mode_int8\n # Parse model file\n if not os.path.exists(onnx_file_path):\n print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))\n exit(0)\n print('Loading ONNX file from path {}...'.format(onnx_file_path))\n with open(onnx_file_path, 'rb') as model:\n print('Beginning ONNX file parsing')\n parser.parse(model.read())\n print('Completed parsing of ONNX file')\n print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))\n engine = builder.build_cuda_engine(network)\n print(\"Completed creating Engine\")\n with open(engine_file_path, \"wb\") as f:\n f.write(engine.serialize())\n return engine", "def resnet18(bitW, bitA, pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], bitW, bitA, **kwargs)\n if pretrained == True:\n load_dict = torch.load('./full_precision_records/weights/model_best.pth.tar')['state_dict']\n model_dict = model.state_dict()\n model_keys = model_dict.keys()\n for name, param in load_dict.items():\n if name.replace('module.', '') in model_keys:\n model_dict[name.replace('module.', '')] = param \n model.load_state_dict(model_dict) \n return model", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def FromFile(cls, path: pathlib.Path, ir_id: int):\n with open(path, \"rb\") as f:\n graph_tuple = pickle.load(f)\n\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)", "def build_model(self) -> nn.Module:\n pass", "def __init__(self, weights_path):\n self.model = mobilenet(input_shape=(224, 224, 3), output_shape=(2, ))\n self.model.load_weights(weights_path)", "def get_engine(onnx_file_path, engine_file_path=\"\"):\n def build_engine():\n \"\"\"Takes an ONNX file and creates a TensorRT engine to run inference with\"\"\"\n with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser:\n builder.max_workspace_size = 1 << 30 # 1GB\n builder.max_batch_size = 1\n builder.fp16_mode = mode_fp16\n # builder.int8_mode = mode_int8\n # Parse model file\n if not os.path.exists(onnx_file_path):\n print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))\n exit(0)\n print('Loading ONNX file from path {}...'.format(onnx_file_path))\n with open(onnx_file_path, 'rb') as model:\n print('Beginning ONNX file parsing')\n parser.parse(model.read())\n print('Completed parsing of ONNX file')\n print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))\n engine = builder.build_cuda_engine(network)\n print(\"Completed creating Engine\")\n with open(engine_file_path, \"wb\") as f:\n f.write(engine.serialize())\n return engine\n\n if os.path.exists(engine_file_path):\n # If a serialized engine exists, use it instead of building an engine.\n print(\"engine file already exits at {}\".format(engine_file_path))\n \n else:\n return build_engine()", "def __init__(self, layer_list_info):\n super(DynaNet, self).__init__()\n self.layer_list_info = layer_list_info\n self.task_modules = nn.ModuleDict()\n self.classification_layers = nn.ModuleDict()\n self.module_generator = ModuleFactory(layer_list_info)\n self.task_module_name_path = {}\n self.nr_levels = len(layer_list_info)\n self.task_idx = None", "def parse_module(filename: str) -> Module:\n state = get_state()\n contents = \"\"\n if str(filename)[-2:] == \"/-\":\n contents, _, __ = decode_bytes(sys.stdin.buffer.read())\n elif not os.path.isfile(filename):\n raise Exception(\"file does not exist\")\n else:\n with open(filename, encoding=\"utf-8\", errors=\"replace\") as file_to_read:\n contents += file_to_read.read()\n try:\n ast = ast3.parse(contents)\n except: # noqa\n out(\"error in parsing\", color=\"red\")\n if state.ignore_exceptions:\n sys.exit(0)\n module = Module(ast, filename) # type: ignore\n return module", "def create_graph():\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, pbfilename), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def generate_ir_for_external_function(code, func_t, context):\n # TODO type hints:\n # def generate_ir_for_external_function(\n # code: vy_ast.FunctionDef,\n # func_t: ContractFunctionT,\n # context: Context,\n # check_nonpayable: bool,\n # ) -> IRnode:\n nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(func_t)\n\n # generate handlers for base args and register the variable records\n handle_base_args = _register_function_args(func_t, context)\n\n # generate handlers for kwargs and register the variable records\n kwarg_handlers = _generate_kwarg_handlers(func_t, context)\n\n body = [\"seq\"]\n # once optional args have been handled,\n # generate the main body of the function\n body += handle_base_args\n\n body += nonreentrant_pre\n\n body += [parse_body(code.body, context, ensure_terminated=True)]\n\n # wrap the body in labeled block\n body = [\"label\", func_t._ir_info.external_function_base_entry_label, [\"var_list\"], body]\n\n exit_sequence = [\"seq\"] + nonreentrant_post\n if func_t.is_constructor:\n pass # init func has special exit sequence generated by module.py\n elif context.return_type is None:\n exit_sequence += [[\"stop\"]]\n else:\n exit_sequence += [[\"return\", \"ret_ofst\", \"ret_len\"]]\n\n exit_sequence_args = [\"var_list\"]\n if context.return_type is not None:\n exit_sequence_args += [\"ret_ofst\", \"ret_len\"]\n # wrap the exit in a labeled block\n exit_ = [\"label\", func_t._ir_info.exit_sequence_label, exit_sequence_args, exit_sequence]\n\n # the ir which comprises the main body of the function,\n # besides any kwarg handling\n func_common_ir = IRnode.from_list([\"seq\", body, exit_], source_pos=getpos(code))\n\n return kwarg_handlers, func_common_ir", "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def birealnet34(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)\n return model", "def get_network(name, batch_size, dtype='float32', ir='relay'):\n if ir == 'relay':\n from tvm.relay import testing\n else:\n raise Exception(\"ir must be `relay`, but you used `{}`\".format(ir))\n\n input_shape = (batch_size, 3, 224, 224)\n if name == 'mobilenet':\n net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)\n elif name == 'mobilenet_v2':\n net, params = testing.mobilenet_v2.get_workload(batch_size=batch_size, dtype=dtype)\n elif name == 'inception_v3':\n input_shape = (batch_size, 3, 299, 299)\n net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)\n elif \"resnet\" in name:\n n_layer = int(name.split('-')[1])\n net, params = testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)\n elif \"vgg\" in name:\n n_layer = int(name.split('-')[1])\n net, params = testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)\n elif \"densenet\" in name:\n n_layer = int(name.split('-')[1])\n net, params = testing.densenet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)\n elif \"squeezenet\" in name:\n version = name.split(\"_v\")[1]\n net, params = testing.squeezenet.get_workload(batch_size=batch_size, version=version, dtype=dtype)\n elif name == 'custom':\n # an example for custom network\n # from tvm.relay.testing import init\n # net = relay.var('data')\n # net = relay.testing.layers.conv2d(net, channels=4, kernel_size=(3,3), padding=(1,1))\n # net = relay.nn.batch_flatten(net)\n # net = relay.testing.layers.dense_add_bias(net, units=1000)\n # net, params = init.create_workload(net, batch_size, (3, 224, 224))\n from tvm.relay.testing import init\n input_shape = (3, 224)\n net = relay.var('data', shape=input_shape)\n weight = relay.var('dense_weight', shape=(224, 224))\n net = relay.nn.dense(net, weight)\n net = relay.Function(relay.analysis.free_vars(net), net)\n # net = relay.testing.layers.dense_add_bias(net, name=\"dense\")\n net, params = init.create_workload(net)\n # simple networks for experimenting\n elif name == 'mlp':\n image_shape = (1, 28, 28)\n input_shape = (batch_size,) + image_shape\n net, params = testing.mlp.get_workload(batch_size=batch_size, image_shape=image_shape)\n elif name == 'nature-dqn':\n image_shape = (4, 84, 84)\n input_shape = (batch_size,) + image_shape\n net, params = testing.dqn.get_workload(batch_size=batch_size, image_shape=image_shape)\n elif name == 'dcgan':\n random_len = 100\n input_shape = (batch_size, random_len)\n net, params = testing.dcgan.get_workload(batch_size, random_len=random_len)\n elif name == 'densenet':\n input_shape = (3, 64, 64)\n net, params = testing.densenet.get_workload(batch_size=batch_size)\n # elif name == 'mxnet':\n # # an example for mxnet model\n # from mxnet.gluon.model_zoo.vision import get_model\n # block = get_model('resnet18_v1', pretrained=True)\n # net, params = nnvm.frontend.from_mxnet(block)\n # net = nnvm.sym.softmax(net)\n else:\n raise ValueError(\"Unsupported network: \" + name)\n\n return net, params, input_shape", "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )", "def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.trained_model_params[1])" ]
[ "0.63068986", "0.6023051", "0.5813477", "0.5714911", "0.5690382", "0.5531973", "0.55067134", "0.5490141", "0.5472677", "0.54595083", "0.5441401", "0.5435935", "0.5368517", "0.5319013", "0.5307069", "0.5298142", "0.5294426", "0.5289352", "0.52519286", "0.5250053", "0.5224872", "0.5201036", "0.5175264", "0.51663953", "0.51645976", "0.5154294", "0.5152527", "0.5149556", "0.51124364", "0.5108633", "0.5103262", "0.5101094", "0.509319", "0.5090641", "0.5090455", "0.50864786", "0.50835323", "0.5068662", "0.5058898", "0.50431496", "0.5042543", "0.5040696", "0.50318444", "0.50287986", "0.50284237", "0.5015878", "0.5014182", "0.5009355", "0.5008876", "0.49943992", "0.49927884", "0.49839246", "0.49676183", "0.49596912", "0.49596912", "0.49596912", "0.49591678", "0.4958907", "0.49580374", "0.4956787", "0.49536586", "0.49471688", "0.4942412", "0.49404335", "0.49399316", "0.49316794", "0.49266374", "0.4926004", "0.49236387", "0.4919707", "0.49152464", "0.49147815", "0.4914323", "0.49142933", "0.49115646", "0.4879402", "0.48792747", "0.4875938", "0.48746315", "0.4869773", "0.48648056", "0.48646915", "0.48599422", "0.4858222", "0.4855161", "0.4850987", "0.48456445", "0.484497", "0.48413113", "0.48292905", "0.48291627", "0.48288205", "0.48203534", "0.48115218", "0.48065737", "0.48063406", "0.48059112", "0.48059112", "0.48053306", "0.48027605" ]
0.8060826
0
Initialises a tree object.
def __init__(self, lower, upper): self.vector = self._initialise(lower, upper) self.seeds = 0 self.year = 0 self.valid = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, tree):\n self._tree = tree", "def __init__(self, tree):\n self._tree = tree", "def __init__(self,tree):\n self._tree = tree", "def __init__(self, tree_node=None):\n self.root = tree_node", "def __init__(self):\n self.tree = {}", "def __init__(self):\n Tree.__init__(self, \"\")", "def __init__(self):\n self.root = TreeNode(None)", "def _initialize_trees(self):", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self):\n self.root = self.Node(None)", "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root", "def __init__(self, *args):\n _snap.TFltTree_swiginit(self, _snap.new_TFltTree(*args))", "def __init__(self, path: str) -> None:\n self.root = Node(\n None, path\n )\n self.depth = 0\n self.add_node(self.root)", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def __init__(self): # 用dict模拟字典树即可\n self.root = {}", "def __init__(self):\n self.root = Node(None)", "def __init__(self, root):\n self._root = root\n self._leaves = [root]", "def __init__(self):\n self.root = self.Node()", "def __init__(self):\n self.root = Node('')", "def __init__(self, node):\n self.node = node\n self.parent = None\n self.depth = None", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self, start_tree=None) -> None:\n self.root = None\n\n # populate BST with initial values (if provided)\n # before using this feature, implement add() method\n if start_tree is not None:\n for value in start_tree:\n self.add(value)", "def __init__(self, start_tree=None) -> None:\n self.root = None\n\n # populate BST with initial values (if provided)\n # before using this feature, implement add() method\n if start_tree is not None:\n for value in start_tree:\n self.add(value)", "def __init__(self, root, branches=None):\n self.tree_dict = {}\n self.directory = Path(root)\n self.start = str(self.directory).rfind(os.sep) + 1\n self.branches = branches\n self.get()", "def __init__(self):\n\n\t\tself.root = None\n\t\tself.numNodes = 0", "def __init__(self):\n self.__root = Node()", "def __init__(self):\n self.root = TreeNode('#')", "def __init__(self, size):\n\n self._root = Node()\n size_left = int(size/2)\n # Initialization of the tree\n self._root.left = self._createSubtree(self._root, 0, size_left) # [a,b[\n self._root.right = self._createSubtree(self._root, size_left, size)\n self._max_priority = 1", "def __init__(self):\n\n self.root = Node(name='root',children=set())\n\n self.map = {}\n self.map['root'] = self.root\n\n self.jsonStr = \"\"", "def __init__(self, directory):\n self.nodes = {}\n self.leaves = {}\n self.root = None\n self.directory = pathlib.Path(directory).absolute()\n self.directory.mkdir(exist_ok=True)", "def setUp(self):\n\n self.root = TreeNode(1)\n\n inorderL= [3,2,5,4]\n preorderL = [2,3,4,5]\n left_subtree = Tree.from_inorder_preorder(inorderL, preorderL)\n\n inorderR = [4,5,2,3]\n preorderR = [2,4,5,3]\n right_subtree = Tree.from_inorder_preorder(inorderR, preorderR)\n\n self.root.left = left_subtree.root\n self.root.right = right_subtree.root\n\n tree = Tree(self.root)", "def __init__(self):\n self.root = TridNode()", "def __init__(self, container=[]):\n # Initialize empty tree.\n self.root = None\n # Insert every item from container.\n for item in container:\n self.insert(item)", "def __init__(self, tree):\n self._tree = tree\n self._size = len(tree)\n\n self._tree.reindex()\n self._preprocess()", "def __init__(self, root=None):\n self.set_root(root)", "def __init__(self, root: Node = None):\n # this alllows us to initialize by copying an existing tree\n self.root = deepcopy(root)\n if self.root:\n self.root.parent = None\n self.size = 0 if not self.root else self.root.subtree_size()", "def __init__(self, root_node):\n\n\t\tself.root = root_node\n\t\tself.left_child = None\n\t\tself.right_child = None", "def __init__(self):\n # use a Trie as a data structure\n self.root = Node()", "def __init__(self):\n self.root = SimpleNode()", "def __init__(self, val=None):\n self.val = val\n self.parent = None\n if val is not None:\n self.left = BSTree()\n self.right = BSTree()\n else:\n self.left = None\n self.right = None", "def __init__(self, tree, filename):\n self.filename = filename\n self.tree = tree", "def __init__(self):\n self.left = None\n self.right = None\n self.depth = 0\n self.val = None\n self.id = None", "def __init__(self, parent):\n if isinstance(parent, Node):\n self.parent = parent\n self.tree = parent.tree\n self.depth = parent.depth + 1\n else:\n self.parent = None\n self.tree = parent\n self.depth = 0\n self.children = []\n self.childrencreated = 0\n self.expanded = 0\n self.selected = 0\n \n self.text_id = 0\n self.image_id = 0\n self.minusplus_id = 0\n \n self.oldy = -1000\n self.changed = 0\n \n if not self.parent: self.tree._setnode(self) # Root Node must be passed to the tree.", "def __init__(self, name):\n debug.printMsg(\"We Initiated a BST with no root node\")\n self.name = name\n self.root = None\n self.size = 0", "def __init__(self) -> None:\n\t\t# Call super\n\t\tsuper(RootNode, self).__init__()\n\t\tself.nodes: List[Node] = []\n\t\tself.subfiles: Set[str] = set()", "def __init__(self, root):\n self.root = root", "def __init__(self, root):\n self.root = root", "def __init__(self):\n self.root = self.TrieNode(None)", "def __init__(self,treeString):\n\t\tself.treeString=treeString\n\t\tif self.checkString(self.treeString) == True:\n\t\t\tself.root=node.node(None)\n\t\t\tself.currNode=self.root\n\t\t\tself.buildTree(self.treeString)\n\t\t\t\"\"\"once information from buildTree is assigned to individual nodes, \n\t\t\tthe nodes then parse that information into names, branch lengths, etc\n\t\t\trecursive function allows the call to be made only to the root\"\"\"\n\t\t\t#use this step to do likelihood calculation\n\t\t\tself.root.processInfo()\n\t\telse:\n\t\t\t#change to an error in long run\n\t\t\tprint \"improperly formatted newick string\"", "def __init__(self):\n self.root = self.get_new_node();", "def __init__(self, rootData=None, treetype=None):\n\t\tself._root = BTNode(rootData) if rootData else None\n\t\tself._size = 1 if rootData else 0\n\t\tself._treetype = treetype\n\n\t\t#Only for printing functionality\n\t\tself._depth = 1 if rootData else 0 #Root is at depth 1\n\t\tself._max_chars = len(str(rootData)) if rootData else 1 #max string length of data elements", "def __init__(self, *args):\n _snap.TStrTree_swiginit(self, _snap.new_TStrTree(*args))", "def _init_tree(self, tree):\n tree_path = self._get_tree_file(self.h5_file)\n if not isinstance(tree, (cKDTree, str, type(None))):\n tree = None\n logger.warning('Precomputed tree must be supplied as a pickle '\n 'file or a cKDTree, not a {}'\n .format(type(tree)))\n\n if tree is None:\n if tree_path in os.listdir(TREE_DIR.name):\n tree = os.path.join(TREE_DIR.name, tree_path)\n\n if isinstance(tree, str):\n tree = self._load_tree(tree)\n\n if tree is None:\n lat_lon = self.lat_lon\n tree = cKDTree(lat_lon) # pylint: disable=not-callable\n self._save_tree(tree, os.path.join(TREE_DIR.name, tree_path))\n\n return tree", "def __init__(self):\n self.root = self.TrieNode(0)", "def __init__(self, value, tree):\n self.value = value\n # Make sure that the decision tree is in the form of a dictionary\n if(isinstance(tree,dict)):\n self.children = tree.keys()", "def __init__(self, state, parent=None, action=None):\n\t\tself.state = state\n\t\tself.parent = parent\n\t\tself.action = action\n\t\tself.depth = 0\n\t\t# If depth is specified then depth of node will be 1 more than the depth of parent\n\t\tif parent:\n\t\t\tself.depth = parent.depth + 1", "def __init__(self, *args):\n _snap.TIntTree_swiginit(self, _snap.new_TIntTree(*args))", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def __init__(self):\n self.root = TrieNode()\n self.first_row = self.root.children", "def __init__(self, root, left, right):\n # attributs prives\n self.__root = root # racine de l'arbre\n self.__left = left\n self.__right = right", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self):\n self.root = {}", "def __init__(self, node_text=\"\", node_type=0, node_parent=None):\n self.node_text = node_text\n self.node_type = node_type\n self.node_parent = node_parent\n self.node_left = None\n self.node_right = None", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def TreeInit(tree):\n \"\"\" Settings/NI_6133 \"\"\"\n tree.addNode('.SETTINGS')\n tree.addNode('.SETTINGS.EXPERIMENT')\n tree.addNode('.SETTINGS.NI')\n tree.addNode('.SETTINGS.NI.NI_6602_TIME')\n tree.addNode('.SETTINGS.NI.NI_6133')\n tree.addNode('.NI_6133')\n tree.addNode('.NI_FPGA')\n tree.addNode('.SETTINGS.NI.NI_6133_DIO')\n tree.addNode('.TEK_2024B')\n tree.addNode('.TEK_2024B.TEK')\n tree.addNode('.TEK_2024B.TEK1')\n tree.addNode('.PIMAX3')\n tree.addNode('.PIMAX3.RAW')\n tree.addNode('.PIMAX3.CAM_SETTING')\n \"\"\" Single-valued member nodes \"\"\"\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_DATE','TEXT',\n 'SHOTDATEANDTIME')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NOTES','TEXT','SHOTNOTES')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SYS_MESSAGE','TEXT','SYSMESSAGE')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_QUALITY','TEXT',\n 'SHOTQUALITY')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:SHOT_NUMBER','TEXT',\n 'SHOTNUMBER')\n AddNodeWithTag(tree,'.SETTINGS.EXPERIMENT:PROG_VERSION','TEXT',\n 'PROGRAM_VERSION')\n AddNodeWithTag(tree, '.TEK_2024B.TEK:RAW', 'TEXT', 'RAWTEKSCOPE')\n AddNodeWithTag(tree, '.TEK_2024B.TEK1:RAW', 'TEXT', 'RAWTEKSCOPE1')", "def __init__(self):\n self.root = None\n self.k = None", "def __init__(self):\n self.root = None\n self.k = None", "def __init__(self):\n self._root = None\n self._size = 0\n self._curr_idx = 0\n self._depths, self._heights = None, None", "def __init__(self, *args):\n if self.__class__ == ctree_parentee_t:\n _self = None\n else:\n _self = self\n this = _ida_hexrays.new_ctree_parentee_t(_self, *args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.root = None", "def __init__(self):\n self.root = None", "def __init__(self, value: T):\n self.value = value\n self.children: List[Tree] = []", "def __init__(self, data, parent):\n self.left = None\n self.right = None\n self.data = data\n self.parent = parent", "def __init__(self):\n self.root = TrieNode(None)", "def __init__(self):\n self.root = TrieNode(None)", "def __init__(self, nodes):\n\t\t\n\t\tself.variables = dict([(n.name, n) for n in nodes])\n\t\tself.roots = [n for n in nodes if not n.parents]\n\t\tself.nodes = nodes", "def __init__(self, depth_limit=float(20)):\n\n self.root = None\n self.depth_limit = depth_limit", "def __init__(self, support_label='bootstrap', remove_name_quotes=True):\n self.name = None\n self.node_names = {} # Maps a name to its TreeNode object\n # Variables involving TreeNode objects:\n self.root = None\n self.leaves = set()\n self.internal = set()\n self.nodes = set()\n self.paths = {}\n self.path_dists = {}\n # # # Private attributes\n self._is_cladogram = None # None means it hasn't been set; will be True or False.\n self._cladogram_branch = 1.0 # length of each branch in a cladogram\n self._remove_name_quotes = remove_name_quotes\n self._support_label = support_label\n self._node_ids = set()\n self._node_id_template = '_node_{}' # Must not invalidate any restricted character set\n self._node_id_index = 0\n self._max_branch_precision = 10", "def __init__(self, klass = BSTNode):\n self.root = None\n self.klass = klass", "def __init__(self, klass = BSTNode):\n self.root = None\n self.klass = klass", "def initialize(self):\n self.tree = ROOT.TTree('tree', 'tree')\n self.simhitcount = []\n self.simhitarrays = np.array(self.simhitcount, dtype=np.int32)\n self.digitcount = []", "def __init__(self, label=\"\"):\n super(Tree, self).__init__(Tree) \n self.mother = None\n self.name = label", "def __init__(self, c):\n TreeNode.__init__(self)\n self.c = c", "def __init__(self, attribute_names):\r\n self.attribute_names = attribute_names\r\n self.tree = None", "def __init__(self, path2tree, treeformat='newick'):\r\n self.path2tree = path2tree\r\n self.treeformat = treeformat\r\n self.tree = Phylo.read(self.path2tree, self.treeformat)", "def __init__(self, name: str) -> None:\n\t\t# Set variables to blank values\n\t\tself.name = name\n\t\tself.has_subfile = False\n\t\tself.root_nodes: List[RootNode] = []\n\t\tself.root_names: Set[str] = set()\n\t\tself.node_names: Set[str] = set()\n\t\tself.subnode_names: Dict[str, Set[str]] = {}", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()" ]
[ "0.8144038", "0.8144038", "0.8024923", "0.79929876", "0.77982944", "0.7713998", "0.7585025", "0.75807476", "0.75445074", "0.75445074", "0.73131335", "0.73130155", "0.7305011", "0.72859436", "0.72735286", "0.7250354", "0.7244573", "0.7243289", "0.72401434", "0.7235641", "0.72117805", "0.71938336", "0.71938336", "0.71262693", "0.71262693", "0.71262693", "0.7107835", "0.7107835", "0.7105834", "0.7100733", "0.7098033", "0.70819473", "0.7076504", "0.7070736", "0.70682883", "0.70596117", "0.69957095", "0.6964174", "0.6962085", "0.69567543", "0.6949535", "0.69480044", "0.69449663", "0.69398916", "0.6933968", "0.6901457", "0.68876916", "0.68643534", "0.6836908", "0.683068", "0.68241215", "0.68241215", "0.68078494", "0.67958426", "0.67521304", "0.67448765", "0.67406267", "0.6738103", "0.6733807", "0.67229855", "0.6722237", "0.6720974", "0.672019", "0.6713412", "0.67120004", "0.66935134", "0.66935134", "0.66935134", "0.66935134", "0.66935134", "0.66919184", "0.66850454", "0.66850454", "0.668414", "0.6667253", "0.6667253", "0.66617537", "0.6644727", "0.6597869", "0.6597869", "0.6594789", "0.6594562", "0.6592571", "0.6592571", "0.6569859", "0.65647024", "0.6554596", "0.6548168", "0.6548168", "0.65389806", "0.65266585", "0.6510838", "0.6498119", "0.64979804", "0.64957863", "0.64934975", "0.64934975", "0.64934975", "0.64934975", "0.64934975", "0.64934975" ]
0.0
-1
Tree object comparison method.
def __cmp__(self, other): if hasattr(other, 'year'): return self.year.__cmp__(other.year)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compareTree(t1, t2):\n \n reorderTree(t1)\n reorderTree(t2)\n\n return compareTreeHelper(t1, t2)", "def __eq__(self, other: 'Tree') ->bool:\n return (type(self) is type(other) and\n self.value == other.value and\n self.children == other.children)", "def __eq__(self, other):\n if not isinstance(other, Tree):\n raise ValueError('Cannot compare objects.')\n\n if self.is_leaf():\n if other.is_leaf():\n return True\n else:\n return False\n\n if other.is_leaf():\n return False\n\n if (self.left_subtree == other.left_subtree and self.right_subtree == other.right_subtree) \\\n or (self.left_subtree == other.right_subtree and self.right_subtree == other.left_subtree):\n return True", "def __eq__(self, other):\n try:\n \"\"\"Checks if other is not None. A check other == None would result in an endless loop. Therefore, this\n try-except block is necessary.\"\"\"\n _ = other.tree\n except AttributeError:\n return False\n return self.tree == other.tree", "def deep_cmp(obj1, obj2):\n pass", "def __eq__(self, other):\n self = filter_tree(self, _remove_visit_meta)\n return super(Node, self).__eq__(filter_tree(other, _remove_visit_meta))", "def _exact_compare(tree1, tree2):\n attrs = ['name', 'length', 'support']\n for n1, n2 in zip(tree1.postorder(), tree2.postorder()):\n for attr in attrs:\n if getattr(n1, attr, None) != getattr(n2, attr, None):\n return False\n return True", "def __eq__(self, other):\n return type(self) == type(other) and self.node is other.node", "def __eq__(self, other: RBtree) -> bool:\n comp = lambda n1, n2: n1 == n2 and ((comp(n1.left, n2.left) and comp(n1.right, n2.right)) if (n1 and n2) else True)\n return comp(self.root, other.root) and self.size == other.size", "def __eq__(self, other):\n return other.left == self.left and other.right == self.right", "def test_deep_equals(obja, objb, isequal):\n\n objatree = wo.typedtree(obja)\n objbtree = wo.typedtree(objb)\n match = objatree == objbtree\n ok = match == isequal\n\n if ok:\n s = \"pass\"\n else:\n s = \"fail\"\n\n print(f\"{obja} == {objb} is {match} : {s}\")\n return ok", "def __eq__(self, other):\n if not isinstance(other, ActivityLogTreeNode):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, *args):\n return _ida_hexrays.treeloc_t___eq__(self, *args)", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self: 'RegexTree', other: object) -> bool:\n return (isinstance(other, RegexTree) and\n self.symbol == other.symbol and\n self.children == other.children)", "def _compare_elements(self, old, new):\n res = None\n # We want to go through the tree post-order\n if isinstance(old, dict):\n res_dict = self.compare_dicts(old, new)\n if (len(res_dict) > 0):\n res = res_dict\n # Now we are on the same level\n # different types, new value is new\n elif (type(old) != type(new)):\n res = {'---': old, '+++': new}\n # recursive arrays\n # we can be sure now, that both new and old are\n # of the same type\n elif (isinstance(old, list)):\n res_arr = self._compare_arrays(old, new)\n if (len(res_arr) > 0):\n res = res_arr\n # the only thing remaining are scalars\n else:\n scalar_diff = self._compare_scalars(old, new)\n if scalar_diff is not None:\n res = scalar_diff\n\n return res", "def __eq__(self, other):\n if not isinstance(other, Node):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n if not isinstance(other, Node):\n return False\n return self.data == other.data", "def test_equivalency(self):\n def compare_func(obj, node):\n # same id\n self.assertEqual(obj.id, node.get(\"id\"))\n\n # same html\n self.assertEqual(obj.html.prettify, node.prettify)\n\n # parents have same id (only for non-root elements)\n if not obj == self.document.root:\n self.assertEqual(obj.parent.id, node.parent.get(\"id\"))\n\n # same number of children\n child_nodes = self.get_children_of_node(node)\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # children have same ids\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n self.assertEqual(child_obj.id, child_node.get(\"id\"))\n\n self.recursively_compare_tree_against_html(compare_func)", "def __eq__(self, other):\n return self.left == other.left and self.right == other.right and self.left2 == other.left2 and self.right2 == other.right2", "def isSameTree(self, node1, node2):\n # print(\"isSameTree call for {} and {}\".format(node1.id, node2.id))\n\n if node1.id == node2.id:\n return True\n if node1.value == node2.value:\n # Compare children, in sorted order based on value\n node1Children = list(\n sorted(\n node1.neighbors,\n key=lambda node:\n node.value))\n node2Children = list(\n sorted(\n node2.neighbors,\n key=lambda node:\n node.value))\n\n if len(node1Children) == len(node2Children):\n # For identical trees, A list of nieghbors\n # in sorted (based on value) order:\n # Should have same length\n # At each position, values are same (verify recursively)\n for i in range(len(node1Children)):\n if not self.isSameTree(node1Children[i], node2Children[i]):\n return False\n # All neighbor pairs verified\n return True", "def __lt__(self, other):\n if self.tree is None:\n return True\n if other is None or other.tree is None:\n return False\n\n if len(self) >= len(other):\n return False\n\n if self.tree is not None and other.tree is not None:\n for x in self:\n if x not in other:\n return False\n return True\n return False", "def recursively_compare_tree_against_html(self, func):\n def inner(obj, node):\n # invoke comparator function\n func(obj=obj, node=node)\n\n # filter\n child_nodes = self.get_children_of_node(node)\n\n # same number of object children and html child nodes\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # loop over children and call recursive compare on them\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n inner(obj=child_obj, node=child_node)\n\n # call inner() with root elements\n inner(obj=self.document.root, node=self.soup.body)", "def __eq__(self, other):\n # check equality of the nodesets\n return self.nodeset.__eq__(other.get_nodeset())", "def compare_trees(tree1, tree2):\n for key in tree1.keys():\n print(key)\n assert key in tree2.keys()\n if isinstance(tree1[key], list):\n print(tree1[key])\n assert tree1[key] == tree2[key]\n else:\n print('Calling compare_trees recursively')\n compare_trees(tree1[key], tree2[key])", "def diff(self, other, match=lambda x: True, clean=False):\n result = {}\n\n def _iterativediff(t1, t2, subdir):\n \"\"\"compares two trees and appends new tree nodes to examine to\n the stack\"\"\"\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))\n\n stack = []\n _iterativediff(self._tree, other._tree, b'')\n while stack:\n subdir, t1, t2 = stack.pop()\n # stack is populated in the function call\n _iterativediff(t1, t2, subdir)\n\n return result", "def cmp ( self, object1, object2 ):\n return cmp( self.get_raw_value( object1 ),\n self.get_raw_value( object2 ) )", "def _node_equal(self, other):\n # We're not equal if other isn't a Node, or if other is a different class.\n if not isinstance(other, Node) or not isinstance(other, self.__class__):\n return False\n # Loop through all children, checking whether they are equal\n for self_child, other_child in zip(self.getChildren(), other.getChildren()):\n if not self_child == other_child:\n return False\n # If we get here, our two nodes much be equal\n return True", "def __eq__(self, other) -> bool:\n if not isinstance(other, self.__class__):\n return False\n\n if self.number_of_nodes() != other.number_of_nodes():\n return False\n if self.number_of_edges() != other.number_of_edges():\n return False\n\n if list(self.nodes) != list(other.nodes):\n return False\n\n # Compare node data.\n for i in self.nodes:\n # We may want to exclude the 'name' attribute from comparisons, assuming\n # it has no logical meaning.\n if self.nodes[i] != other.nodes[i]:\n return False\n\n if list(self.edges) != list(other.edges):\n return False\n\n for i, j in self.edges:\n # Compare edge data.\n if self.edges[i, j] != other.edges[i, j]:\n return False\n\n return True", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def test_consistency(self):\n def compare_func(obj, node):\n # number of children must be consistent\n self.assertEqual(\n len(obj.children),\n len(obj._children)\n )\n\n # obj.html equals node\n self.assertEqual(obj._html, node)\n\n # coordinates\n self.assertEqual(\n obj._coordinates,\n obj.coordinates,\n self.expected[\"coordinates\"][obj.id or \"document\"]\n )\n\n # confidence\n self.assertAlmostEqual(\n obj.confidence,\n self.expected[\"confidence\"][obj.id or \"document\"]\n )\n\n self.recursively_compare_tree_against_html(compare_func)", "def is_identical(self, tree1, tree2):\r\n if not tree1 and not tree2:\r\n return True\r\n elif tree1 and tree2:\r\n return (tree1.root == tree2.root and self.is_identical(tree1.left,tree2.left) and self.is_identical(tree1.right, tree2.right))\r\n else:\r\n return False", "def _compare_operations(self, root_a, root_b):\n\n # compare current root\n if root_a != root_b:\n return False\n\n # make sure root have the same number of children\n if root_a.child_count() != root_b.child_count():\n return False\n\n # recursively compare children\n for i in range(root_a.child_count()):\n if not self._compare_operations(root_a.children[i], root_b.children[i]):\n return False\n\n return True", "def __eq__(self, *args):\n return _ida_hexrays.ctree_items_t___eq__(self, *args)", "def cmp ( self, object1, object2 ):\n return cmp( object1[ self.index ], object2[ self.index ] )", "def __eq__(self, other):\n\n return (self.nodes[0].id == other.nodes[0].id) & \\\n (self.nodes[1].id == other.nodes[1].id) & \\\n (self.name == other.name)", "def __eq__(self, node):\n return (self.entry == node.entry)", "def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")", "def isSameTree(self, p, q):\n # Initialize two queues for each tree with root nodes\n q1, q2 = deque([p]), deque([q])\n\n # Loop while q1 and q2 len(q1) == len(q2)\n while (q1 and q2) and (len(q1) == len(q2)):\n p1 = q1.popleft() # -> None\n p2 = q2.popleft() # -> None\n\n # Check for left and right child for each tree\n if (p1 and p2) and (p1.val == p2.val):\n # If the values are the same, put them in q1, q2\n q1.extend([p1.left, p1.right])\n q2.extend([p2.left, p2.right])\n elif (not p1 and not p2):\n continue\n else:\n # If the values are not the same, return False\n return False\n\n # Return True\n return True", "def __eq__(self, other: 'PriorityNode') -> bool:\n return self.priority == other.priority and self.value == other.value", "def compare(self, node) -> bool:\n\t\t# No conflicts, Return True\n\t\treturn True", "def __eq__(self, obj):\r\n return (self.position == obj.position and self.left_cont == obj.left_cont\r\n and self.line == obj.line and self.right_cont == obj.right_cont)", "def compare(obj_a, obj_b):\n\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.puzzle == other.puzzle and\n all([x in self.children for x in other.children]) and\n all([x in other.children for x in self.children]))", "def compare(self, node: NodeSubNodeHolder) -> bool:\n\t\tnode_names = self.get_node_names()\n\t\tsubnode_names = self.get_subnode_names()\n\n\t\tif node.name == self.name:\n\t\t\t# Compare Nodes\n\t\t\tfor new_node_i in range(len(node.nodes)):\n\t\t\t\tif node.nodes[new_node_i].name in node_names:\n\t\t\t\t\tfor node_i in range(len(self.nodes)):\n\t\t\t\t\t\tself.nodes[node_i].compare(node.nodes[new_node_i])\n\n\t\t\t# Compare SubNodes\n\t\t\tfor new_subnode_i in range(len(node.subnodes)):\n\t\t\t\tif node.subnodes[new_subnode_i] in subnode_names:\n\t\t\t\t\tfor subnode_i in range(len(self.subnodes)):\n\t\t\t\t\t\tself.subnodes[subnode_i].compare(node.subnodes[new_subnode_i])\n\n\t\t\t# Return False\n\t\t\treturn False\n\n\t\t# No conflicts, Return True\n\t\treturn True", "def __eq__(self, other):\n parent_same = self.parent1.rid == other.parent1.rid \\\n and self.parent2.rid == other.parent2.rid\n\n parents_opposite = self.parent2.rid == other.parent1.rid \\\n and self.parent1.rid == other.parent2.rid\n\n return parent_same or parents_opposite", "def __eq__(self, node):\n if node == None or self.element != node.element:\n return False\n return self.left == node.left and self.right == node.right", "def compare_topology(tree1, tree2):\n n2p1, n2p2 = ({node.name: node.parent.name\n for node in tree.traverse() if not node.is_root()}\n for tree in (tree1, tree2))\n return n2p1 == n2p2", "def __eq__(self, other_node):\n return self.state == other_node.state", "def __eq__(self, other):\n if not isinstance(other, NodeStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_trees(tree1, tree2):\n \tresponse = {}\n \tstart_time = time.time()\n \ttry:\t\n \t\ttns = dendropy.TaxonNamespace() \t\n \t\n \t\ttree_obj1 = dendropy.Tree.get(data=tree1, schema=\"newick\",taxon_namespace=tns)\n \t\ttree_obj2 = dendropy.Tree.get(data=tree2, schema=\"newick\",taxon_namespace=tns)\n\n \t\ttree_obj1.encode_bipartitions()\n \t\ttree_obj2.encode_bipartitions()\n\n \t\t#-----------------------------------------------------------\n \t\t#This method returns the symmetric distance between two trees. \n \t\t#The symmetric distance between two trees is the sum of the number of splits found in one of the trees but not the other. \n \t\t#It is common to see this statistic called the Robinson-Foulds distance\n\n \t\tareSame = True if treecompare.symmetric_difference(tree_obj1, tree_obj2) == 0 else False\n \t\tstatus = 200\n \t\tmessage = \"Success\"\n \t\tresponse['are_same_tree'] = areSame\n \n \texcept Exception, e:\n \t\tif \"Incomplete or improperly-terminated tree statement\" in str(e): #invalid: \"((A,B),C,D));\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderIncompleteTreeStatementError: \" + str(e)\n \t \t\tstatus = 400\n \t\telif \"Unbalanced parentheses at tree statement\" in str(e): #invalid: \"((A,B),(C,D);\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderMalformedStatementError: \"+str(e) \n \t \t\tstatus = 400\n \t\telif \"Multiple occurrences of the same taxa\" in str(e): #invalid: \"((A,B),(C,C));\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"NewickReaderDuplicateTaxonError: \"+str(e)\n \t \t\tstatus = 400\n \t\telif \"Unexpected end of stream\" in str(e): # invalid: \"((A,B),(C,D))\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"UnexpectedEndOfStreamError: \"+str(e)\n \t \t\tstatus = 400\n \t\telse:\n \t\t\tmessage = \"Error: Failed to compare trees. \"+str(e)\n \t \t\tstatus = 500\n \t \t\n \tresponse['status_code'] = status\n \tresponse['message'] = message\n\n \tend_time = time.time()\n \texecution_time = end_time-start_time\n #service result creation time\n \tcreation_time = datetime.datetime.now().isoformat()\n \tmeta_data = {'creation_time': creation_time, 'execution_time': float('{:4.2f}'.format(execution_time)), 'source_urls':[\"http://dendropy.org/library/treecompare.html#module-dendropy.calculate.treecompare\"] }\n\n \tresponse['meta_data'] = meta_data\n \tprint response\n \treturn response", "def __eq__(self, other):\n if not isinstance(other, Node):\n return NotImplemented\n return self.state == other.state", "def __lt__(self, *args):\n return _ida_hexrays.treeloc_t___lt__(self, *args)", "def __gt__(self, other: 'MinNode') -> bool:\n if self.priority == other.priority:\n return self.value > other.value\n return self.priority > other.priority", "def __eq__(self, other):\n return self.element() == other.element()", "def __eq__(self, other):\r\n\r\n return type(self) == type(other) and self.ttype == other.ttype", "def test_cmp(self, tmpdir, treantclass):\n with tmpdir.as_cwd():\n c1 = treantclass('a')\n c2 = treantclass('b')\n c3 = treantclass('c')\n\n assert sorted([c3, c2, c1]) == [c1, c2, c3]\n assert c1 <= c2 < c3\n assert c3 >= c2 > c1", "def __compare(self, key=None, method='compare', source=None, print_path=False):\r\n compare_node = source if source else self.root\r\n parent_node = None\r\n\r\n while compare_node.key:\r\n parent_node = compare_node\r\n\r\n # method search\r\n if method == 'search':\r\n if parent_node.key == key:\r\n # when the method is search, compare_node is the result\r\n parent_node = parent_node.parent\r\n break\r\n\r\n # for method compare and search\r\n if method == 'compare' or method == 'search':\r\n compare_node = parent_node.left_child if key <= parent_node.key else parent_node.right_child\r\n\r\n # method min\r\n if method == 'min':\r\n compare_node = parent_node.left_child\r\n\r\n # method max\r\n if method == 'max':\r\n compare_node = parent_node.right_child\r\n\r\n if print_path:\r\n try:\r\n root_string = \"(root)\" if not parent_node.parent else \"\"\r\n print(root_string + \"({}, {}, {}) -> ({}, {}, {})\".format(\r\n parent_node.key, parent_node.value, parent_node.get_color(),\r\n compare_node.key, compare_node.value, compare_node.get_color()))\r\n except AttributeError:\r\n pass\r\n\r\n return parent_node, compare_node", "def is_same_branch(self, other):\n if self.id == other.id:\n return True\n elif self.is_descendant_of(other) or other.is_descendant_of(self):\n return True\n else:\n return False", "def isSameTree(self, p: TreeNode, q: TreeNode) -> None:\n # if both are null -> return true\n if not p and not q:\n return True\n\n # if only one of them is null -> False\n if not p or not q: \n return False\n \n # if value is different -> False\n if p.val != q.val:\n return False \n\n return self.isSameTree(p.right, q.right) and self.isSameTree(p.left, q.left)", "def compareNodes(x, y):\n return x.pathValue - y.pathValue", "def __eq__(self, other):\n if not isinstance(other, NodeProperties):\n return False\n\n return self.__dict__ == other.__dict__", "def __cmp__(self, other):\n\t\tif isinstance(other, BaseType):\n\t\t\treturn cmp(self.level, other.level) or cmp(self.name, other.name)\n\t\telse:\n\t\t\treturn 1", "def __eq__(self, other):\n try:\n return (self.tag == other.tag and self.attributes == other.attributes)\n except:\n return False", "def __eq__(self, other):\n if not isinstance(other, Branchrestriction):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # check equality of names and attributes as well as that of the incident Node objects\n return \\\n self.weight == other.get_weight() and \\\n self.attributes.__eq__(other.get_attributes()) and \\\n self.get_incident_nodes().__eq__(other.get_incident_nodes())", "def compare(self, subnode) -> bool:\n\t\t# OK the node if it has a different name.\n\t\tif subnode.name != self.name:\n\t\t\treturn True\n\t\t# Alter self if incorrect type\n\t\tself._get_true_type(subnode.get_typestring())\n\t\t# Add filenames\n\t\tif subnode.name == \"File\":\n\t\t\tself.filenames.update(subnode.filenames)", "def compare(self,node, new_node):\n if new_node.get_value() == node.get_value():\n return 0\n elif new_node.get_value() < node.get_value():\n return -1\n else:\n return 1", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __gt__(self, other: 'MaxNode') -> bool:\n if self.priority == other.priority:\n return self.value < other.value\n return self.priority < other.priority", "def compare(self, other):\n # all redefinitions of __cmp__ method should start with the\n # following lines:\n if self is other:\n return 0\n n1 = self.__class__\n n2 = other.__class__\n c = _old_compare(n1, n2)\n if c:\n return c\n #\n st = self._hashable_content()\n ot = other._hashable_content()\n c = (len(st) > len(ot)) - (len(st) < len(ot))\n if c:\n return c\n for l, r in zip(st, ot):\n l = Basic(*l) if isinstance(l, frozenset) else l\n r = Basic(*r) if isinstance(r, frozenset) else r\n if isinstance(l, Basic):\n c = l.compare(r)\n else:\n c = (l > r) - (l < r)\n if c:\n return c\n return 0", "def __gt__ (self, other) :\n return other.__lt__(self)", "def compare_tree(self):\n result = []\n \n pathA = os.path.join(self.testpath,'A')\n pathB = os.path.join(self.testpath,'B')\n\n filesA = [os.path.relpath(f,pathA) for f in self.tree(pathA)]\n filesB = [os.path.relpath(f,pathB) for f in self.tree(pathB)]\n\n filesAB = set(filesA).union(filesB)\n for fileAB in sorted(list(filesAB)):\n\n fileA = os.path.join(self.testpath,'A',fileAB)\n fileB = os.path.join(self.testpath,'B',fileAB)\n try:\n fileAtxt = open(fileA).read()\n except IOError:\n result.append( ('missing_inA',fileAB) )\n continue\n \n try:\n fileBtxt = open(fileB).read()\n except IOError:\n result.append( ('missing_inB',fileAB) )\n continue\n\n if not fileAtxt == fileBtxt:\n result.append( ('disagree',fileAB))\n \n return result", "def compare(self, node, new_node):\n if new_node.get_value() == node.get_value():\n return 0\n elif new_node.get_value() < node.get_value():\n return -1 # traverse left\n else: # new_node > node\n return 1 # traverse right", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __lt__(self, other):\n return self._obj_func() < other._obj_func()", "def is_same(self: _R, other: _R) -> bool:\n children = [i.render() for i in self.children]\n other_children = [i.render() for i in other.children]\n return other_children == children", "def __lt__(self, other: 'MaxNode') -> bool:\n if self.priority == other.priority:\n return self.value > other.value\n return self.priority > other.priority", "def __cmp__(self, other_code_object):\n # If our 'compare' method returns anything there are differences\n if self.compare(other_code_object):\n return True\n else:\n return False", "def __ge__(self, other: object) -> bool:\n ...", "def __ge__(self, other: object) -> bool:\n ...", "def __eq__(self, other):\n if not isinstance(other, StateSyncNode):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __cmp__(self, other):\n return cmp(repr(self), repr(other))", "def __eq__(self, other):\n if not isinstance(other, MtreeIntegratedUsage):\n return False\n\n return self.to_dict() == other.to_dict()", "def __lt__(self,other):\n return self.lvl < other.lvl", "def __eq__(self, another_node):\n return Node.state_as_string(self.state) == Node.state_as_string(another_node.state)", "def _cmp_(self, other):\n return cmp(self.matrix(), other.matrix())", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def _iterativediff(t1, t2, subdir):\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))", "def __gt__(self, other):\n return self.element() > other.element()", "def compare_balanced_tree(G, node1:str, node2:str, traversed1:list, traversed2:list):\n logger.debug(f\"checking symmtrical connections for nodes: {node1}, {node2}\")\n tree1 = set(get_next_level(G,[node1]))\n tree2 = set(get_next_level(G,[node2]))\n traversed1.append(node1)\n traversed2.append(node2)\n if tree1==tree2:\n return True\n while(len(list(tree1))== len(list(tree2)) > 0):\n logger.debug(f\"tree1 {tree1} tree2 {tree2} traversed1 {traversed1} traversed2 {traversed2}\")\n tree1 = set(tree1) - set(traversed1)\n tree2 = set(tree2) - set(traversed2)\n\n if tree1.intersection(tree2) or len(list(tree1))== len(list(tree2))==0:\n return True\n else:\n traversed1+=list(tree1)\n traversed2+=list(tree2)\n tree1=set(get_next_level(G,tree1))\n tree2=set(get_next_level(G,tree2))\n\n logger.debug(f\"Non symmetrical branches for nets: {node1}, {node2}\")\n return False", "def __eq__( self, other ):\n return self.data == other.data", "def __eq__(self, other):\n pass", "def __eq__(self, other):\n pass" ]
[ "0.7647767", "0.75716287", "0.73822033", "0.72987217", "0.71460176", "0.70908886", "0.6981702", "0.6969616", "0.6951032", "0.69436485", "0.6897531", "0.68732524", "0.6837008", "0.6833025", "0.6833025", "0.6833025", "0.6833025", "0.6833025", "0.6814338", "0.6804265", "0.6780416", "0.6729602", "0.67159766", "0.6682937", "0.6674231", "0.6643086", "0.66397655", "0.662943", "0.65995175", "0.6598597", "0.65675026", "0.65593886", "0.6543836", "0.6530083", "0.6506146", "0.6506146", "0.6504614", "0.64417654", "0.64356166", "0.6429579", "0.6406121", "0.63914394", "0.63785297", "0.6371664", "0.636331", "0.63562", "0.6338435", "0.631219", "0.6307216", "0.62872994", "0.6259908", "0.62595254", "0.6259025", "0.62295216", "0.6227364", "0.6213094", "0.6178555", "0.61606175", "0.6159509", "0.61563045", "0.6152331", "0.61400014", "0.61281997", "0.6126146", "0.6114311", "0.61096543", "0.61023295", "0.6062364", "0.60600716", "0.6056564", "0.60478765", "0.60463256", "0.6035427", "0.6032895", "0.6027445", "0.60252106", "0.602005", "0.60141504", "0.60118604", "0.60101336", "0.6009309", "0.59978014", "0.5995174", "0.59904575", "0.59885466", "0.59875476", "0.59875476", "0.59812784", "0.5980604", "0.59745204", "0.59608686", "0.5950159", "0.59479356", "0.59419036", "0.5941889", "0.5935398", "0.5927167", "0.592561", "0.5923678", "0.5921796", "0.5921796" ]
0.0
-1
Tree object equality test.
def __eq__(self, other): if hasattr(other, 'vector'): return self.vector == other.vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other: 'Tree') ->bool:\n return (type(self) is type(other) and\n self.value == other.value and\n self.children == other.children)", "def test_deep_equals(obja, objb, isequal):\n\n objatree = wo.typedtree(obja)\n objbtree = wo.typedtree(objb)\n match = objatree == objbtree\n ok = match == isequal\n\n if ok:\n s = \"pass\"\n else:\n s = \"fail\"\n\n print(f\"{obja} == {objb} is {match} : {s}\")\n return ok", "def __eq__(self, other):\n if not isinstance(other, Tree):\n raise ValueError('Cannot compare objects.')\n\n if self.is_leaf():\n if other.is_leaf():\n return True\n else:\n return False\n\n if other.is_leaf():\n return False\n\n if (self.left_subtree == other.left_subtree and self.right_subtree == other.right_subtree) \\\n or (self.left_subtree == other.right_subtree and self.right_subtree == other.left_subtree):\n return True", "def __eq__(self, other):\n try:\n \"\"\"Checks if other is not None. A check other == None would result in an endless loop. Therefore, this\n try-except block is necessary.\"\"\"\n _ = other.tree\n except AttributeError:\n return False\n return self.tree == other.tree", "def __eq__(self, other):\n return type(self) == type(other) and self.node is other.node", "def __eq__(self, other):\n return other.left == self.left and other.right == self.right", "def __eq__(self, other):\n if not isinstance(other, Node):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: 'RegexTree', other: object) -> bool:\n return (isinstance(other, RegexTree) and\n self.symbol == other.symbol and\n self.children == other.children)", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other: RBtree) -> bool:\n comp = lambda n1, n2: n1 == n2 and ((comp(n1.left, n2.left) and comp(n1.right, n2.right)) if (n1 and n2) else True)\n return comp(self.root, other.root) and self.size == other.size", "def __eq__(self, *args):\n return _ida_hexrays.treeloc_t___eq__(self, *args)", "def __eq__(self, other):\n self = filter_tree(self, _remove_visit_meta)\n return super(Node, self).__eq__(filter_tree(other, _remove_visit_meta))", "def __eq__(self, node):\n if node == None or self.element != node.element:\n return False\n return self.left == node.left and self.right == node.right", "def __eq__(self, node):\n return (self.entry == node.entry)", "def __eq__(self, other):\n if not isinstance(other, Node):\n return False\n return self.data == other.data", "def __eq__(self, other):\n return self.left == other.left and self.right == other.right and self.left2 == other.left2 and self.right2 == other.right2", "def __eq__(self, obj):\r\n return (self.position == obj.position and self.left_cont == obj.left_cont\r\n and self.line == obj.line and self.right_cont == obj.right_cont)", "def test_equivalency(self):\n def compare_func(obj, node):\n # same id\n self.assertEqual(obj.id, node.get(\"id\"))\n\n # same html\n self.assertEqual(obj.html.prettify, node.prettify)\n\n # parents have same id (only for non-root elements)\n if not obj == self.document.root:\n self.assertEqual(obj.parent.id, node.parent.get(\"id\"))\n\n # same number of children\n child_nodes = self.get_children_of_node(node)\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # children have same ids\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n self.assertEqual(child_obj.id, child_node.get(\"id\"))\n\n self.recursively_compare_tree_against_html(compare_func)", "def __eq__(self, other):\n if not isinstance(other, ActivityLogTreeNode):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # check equality of the nodesets\n return self.nodeset.__eq__(other.get_nodeset())", "def is_identical(self, tree1, tree2):\r\n if not tree1 and not tree2:\r\n return True\r\n elif tree1 and tree2:\r\n return (tree1.root == tree2.root and self.is_identical(tree1.left,tree2.left) and self.is_identical(tree1.right, tree2.right))\r\n else:\r\n return False", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def test_equals(self):\n parameters = [\n (1, 'a', False),\n (1, None, False),\n (1, 2, False),\n (1, 1, True)\n ]\n for pair in parameters:\n with self.subTest(pair=pair):\n self.getLogger().info('Next pair %s', pair)\n _obj1 = Node(pair[0])\n _obj2 = None if not pair[1] else Node(pair[1])\n self.assertEqual(_obj1._equals(_obj2), pair[2])\n _objSelf = Node(1)\n self.assertTrue(_objSelf._equals(_objSelf))", "def __eq__(self, other):\n return (type(self) == type(other) and\n self.puzzle == other.puzzle and\n all([x in self.children for x in other.children]) and\n all([x in other.children for x in self.children]))", "def __eq__(self, other_node):\n return self.state == other_node.state", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def __eq__(self, other):\n # check equality of names since names are unique identifiers of nodes\n return self.name.__eq__(other.get_name())", "def __eq__(self, other):\n if not isinstance(other, StateSyncNode):\n return False\n\n return self.__dict__ == other.__dict__", "def _node_equal(self, other):\n # We're not equal if other isn't a Node, or if other is a different class.\n if not isinstance(other, Node) or not isinstance(other, self.__class__):\n return False\n # Loop through all children, checking whether they are equal\n for self_child, other_child in zip(self.getChildren(), other.getChildren()):\n if not self_child == other_child:\n return False\n # If we get here, our two nodes much be equal\n return True", "def __eq__(self, other):\n if not isinstance(other, Node):\n return NotImplemented\n return self.state == other.state", "def __eq__(self, obj):\r\n return assert_(self.obj == obj, '%r != %r' % (self.obj, obj))", "def __eq__(self, other):\n if other is None or not isinstance(other, IDLNode):\n return 1\n return self.__dict__.__eq__(other.__dict__)", "def __eq__(self, *args):\n return _ida_hexrays.ctree_items_t___eq__(self, *args)", "def __eq__(self, other):\n if not isinstance(other, NodeProperties):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, node):\n if node == None or self.element != node.element:\n return False\n return self.index == node.index", "def eq(self, other: Any) -> bool:\n # TODO: Rasswanth: Fix later after the comparison operation\n # relative\n # from .... import Tensor\n\n # if (\n # isinstance(self.child, Tensor)\n # and isinstance(other.child, Tensor)\n # and (self.child != other.child).child.any() # type: ignore\n # ):\n # return False\n\n # if (\n # isinstance(self.child, np.ndarray)\n # and isinstance(other.child, np.ndarray)\n # and (self.child != other.child).any()\n # ):\n # return False\n\n # if self.rank != other.rank:\n # return False\n\n # if self.ring_size != other.ring_size:\n # return False\n\n # if self.nr_parties != other.nr_parties:\n # return False\n\n # return True\n\n # ATTENTION: Why are we getting here now when we never did before?\n if not hasattr(other, \"child\"):\n return self.child == other\n\n return self.child == other.child", "def __eq__(self, other) -> bool:\n if not isinstance(other, self.__class__):\n return False\n\n if self.number_of_nodes() != other.number_of_nodes():\n return False\n if self.number_of_edges() != other.number_of_edges():\n return False\n\n if list(self.nodes) != list(other.nodes):\n return False\n\n # Compare node data.\n for i in self.nodes:\n # We may want to exclude the 'name' attribute from comparisons, assuming\n # it has no logical meaning.\n if self.nodes[i] != other.nodes[i]:\n return False\n\n if list(self.edges) != list(other.edges):\n return False\n\n for i, j in self.edges:\n # Compare edge data.\n if self.edges[i, j] != other.edges[i, j]:\n return False\n\n return True", "def __eq__(self, other):\n if self is other:\n return True\n elif type(self) != type(other):\n return False\n else:\n # A node is considered equal if it has the exact same state as\n # another node\n if self.board_state == other.board_state:\n return True\n else:\n return False", "def __eq__(self, other):\n\n return (self.nodes[0].id == other.nodes[0].id) & \\\n (self.nodes[1].id == other.nodes[1].id) & \\\n (self.name == other.name)", "def __eq__(self, another_node):\n return Node.state_as_string(self.state) == Node.state_as_string(another_node.state)", "def __eq__(self, other):\n return self.key == other.key \\\n and self.get_inside() == other.get_inside() \\\n and self.get_outside() == other.get_outside()", "def __eq__(self, other):\n return self.element() == other.element()", "def __eq__(self, other):\n parent_same = self.parent1.rid == other.parent1.rid \\\n and self.parent2.rid == other.parent2.rid\n\n parents_opposite = self.parent2.rid == other.parent1.rid \\\n and self.parent1.rid == other.parent2.rid\n\n return parent_same or parents_opposite", "def __eq__(self, other):\n return equal(self, other)", "def isSameTree(self, node1, node2):\n # print(\"isSameTree call for {} and {}\".format(node1.id, node2.id))\n\n if node1.id == node2.id:\n return True\n if node1.value == node2.value:\n # Compare children, in sorted order based on value\n node1Children = list(\n sorted(\n node1.neighbors,\n key=lambda node:\n node.value))\n node2Children = list(\n sorted(\n node2.neighbors,\n key=lambda node:\n node.value))\n\n if len(node1Children) == len(node2Children):\n # For identical trees, A list of nieghbors\n # in sorted (based on value) order:\n # Should have same length\n # At each position, values are same (verify recursively)\n for i in range(len(node1Children)):\n if not self.isSameTree(node1Children[i], node2Children[i]):\n return False\n # All neighbor pairs verified\n return True", "def __eq__(self,other):\n return self is other", "def testEquality(self):\n pass", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__" ]
[ "0.81907886", "0.8065539", "0.79538393", "0.78520125", "0.7722757", "0.7619981", "0.75722384", "0.7570807", "0.7570623", "0.7570623", "0.7570623", "0.7570623", "0.7570623", "0.7509258", "0.7502789", "0.7492585", "0.74790883", "0.74709064", "0.7460193", "0.7412015", "0.7403098", "0.7324884", "0.7270347", "0.7257898", "0.7229106", "0.72258806", "0.71789765", "0.71540457", "0.71303034", "0.7117659", "0.70764536", "0.70637673", "0.70484215", "0.70484215", "0.70267344", "0.6998169", "0.6985585", "0.697855", "0.69661933", "0.69378203", "0.69274306", "0.6920368", "0.6911975", "0.69099677", "0.68975836", "0.68898714", "0.6882722", "0.68791586", "0.6870801", "0.68630487", "0.686042", "0.6857629", "0.68567055", "0.6856008", "0.6851829", "0.68397075", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197", "0.6834197" ]
0.0
-1
Tree object non equality test.
def __ne__(self, other): if hasattr(other, 'vector'): return self.vector != other.vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, obj):\r\n return assert_(self.obj != obj, '%r == %r' % (self.obj, obj))", "def __eq__(self, other):\n self = filter_tree(self, _remove_visit_meta)\n return super(Node, self).__eq__(filter_tree(other, _remove_visit_meta))", "def __ne__(self, obj):\n return not self.__eq__(obj)", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __eq__(self, other):\n try:\n \"\"\"Checks if other is not None. A check other == None would result in an endless loop. Therefore, this\n try-except block is necessary.\"\"\"\n _ = other.tree\n except AttributeError:\n return False\n return self.tree == other.tree", "def __ne__(self: _TT, other: object) -> bool:\n return self.ne(other) # type: ignore", "def __ne__(self,other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other) # opposite of __eq__", "def __ne__(self,other):\n return not self == other", "def __ne__(self, other):\n return not(self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __eq__(self, other: 'Tree') ->bool:\n return (type(self) is type(other) and\n self.value == other.value and\n self.children == other.children)", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, G):\n return not self.__eq__(G)", "def __ne__(self, other):\n return not (self == other) # opposite of __eq__", "def __ne__(self, other):\r\n return not (self == other)", "def __ne__(self, other):\n return not_equal(self, other)", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __neq__(self, other): \n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other" ]
[ "0.7296645", "0.7240735", "0.7235913", "0.7218751", "0.71460253", "0.7126969", "0.7104418", "0.7090795", "0.7070545", "0.703232", "0.70297754", "0.70278966", "0.70278966", "0.70278966", "0.7015042", "0.6992102", "0.6992102", "0.6984518", "0.69703865", "0.6957141", "0.6948727", "0.69455075", "0.69333154", "0.69333154", "0.69333154", "0.69333154", "0.69333154", "0.69333154", "0.69333154", "0.69333154", "0.69333154", "0.6930596", "0.6929878", "0.6929878", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002", "0.6912002" ]
0.0
-1
Runs a forest optimisation algorithm.
def run(self, verbose=False): cost = {}; cost["best"] = []; cost["mean"] = [] for i in range(self.max_iters): # prints out information at current cycle if verbose: print("Iteration: {}".format(i), "Fitness: {}".format(self.forest[0][0])) # reproduction phase self.reproduce() # seed dispersal phase self.seedlings = [] for tree in self.population: self.disperse(tree[1]) tree[1].year += 1 # selection phase self.select() # decays exploration parameters if (self.epsilon > 0): self.epsilon -= self.epsilon_decay # stores statistics and updates counter of iterations cost["best"].append(self.population[0][0]) cost["mean"].append( sum( [ tree[0] for tree in self.population ] )\ / len(self.population) ) self.iteration += 1 return cost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)", "def execute(self):\n process_list = []\n forests_queue = Queue(self.power)\n iterational = 0\n print '| |-starting evaluation, training and validation'\n for one_forest in self._forests:\n process_list.append(\n Process(target=main_async_method,\n args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings)))\n iterational += 1\n for proc in process_list:\n proc.start()\n for proc in process_list:\n proc.join()\n for smth in range(forests_queue.qsize()):\n tmp = forests_queue.get()\n self._forests[tmp['place']].fitness = tmp['fitness']\n fitness_summ = sum(map(lambda forest: forest.fitness, self._forests))\n fss = map(lambda x: x.fitness, self._forests)\n print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss)\n self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)", "def growForest(config, load_exp_file=True):\n\n silent = config.get('silent', False)\n experiment_Path = r\"C:\\Users\\user\\Desktop\\Prediction_model\\experiment\\flood.exp\"\n\n if load_exp_file:\n #loadExperimentFile(config, filename=config.exp)\n loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n\n forests = []\n results = []\n\n\n # do multiple runs if needed. note that we start at config.run, not zero\n for run in range(config.num_runs):\n training_graphs, testing_graphs = splitDict(config.graphs, int(len(config.graphs) * .8), random=True)\n\n \"\"\"\n # perform under-sampling if needed\n if hasattr(config, 'underlabel'):\n under_graphs = {}\n skip_count = 0\n for k in training_graphs.keys():\n if training_graphs[k].class_label == config.underlabel and random.random() <= config.underval:\n skip_count += 1\n else:\n under_graphs[k] = training_graphs[k]\n print('Undersampled ' + str(skip_count) + ' graphs')\n training_graphs = under_graphs\n \"\"\"\n # print out some useful info on the class distribution\n counts = defaultdict(int)\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('training:', len(training_graphs), counts)\n\n counts = defaultdict(int)\n for graph in testing_graphs.values():\n counts[graph.class_label] += 1\n print('testing:', len(testing_graphs), counts)\n\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('total:', len(config.graphs), counts)\n\n print('\\nrun:', run)\n config.run = run\n\n srrf = SRRForest(config)\n #srrf.growForest(training_graphs)\n srrf.growForest(config.graphs)\n forests.append(srrf)\n #srrf.training_graph_ids = list(training_graphs.keys())\n #training_labeling = srrf.labelGraphs(training_graphs,config.time_list)\n #outOfBagLabels=srrf.getOutOfBagLabels()\n #print(\"outOfBagLabels\")\n #print(outOfBagLabels)\n #c=srrf.compute_oob_score(training_graphs, outOfBagLabels)\n #print(\"concordance index:\")\n #print(c)\n config.saveTrees(srrf)\n\n #results.append(c)\n\n\n\n\n \"\"\"\n\n df = pd.DataFrame(columns=['lon', 'lat', 'survival_probability', 'time'])\n\n\n srrf.testing_graph_ids = testing_graphs.keys()\n testing_labeling = srrf.labelGraphs(testing_graphs,config.time_list)\n\n\n\n\n\n\n\n for i,h in testing_labeling.items():\n\n lat = i.graph.attributes_by_type.get(('cell', 'lat'))[0].value\n lon = i.graph.attributes_by_type.get(('cell', 'lon'))[0].value\n for t, label in h.items():\n df = df.append(\n {'lon': lon, 'lat': lat, 'survival_probability': label[1], 'time': t},\n ignore_index=True)\n\n sort_by_time = df.sort_values('time')\n print(sort_by_time.head())\n import plotly.express as px\n fig = px.scatter_mapbox(sort_by_time, lat=\"lat\", lon=\"lon\", hover_data=[\"survival_probability\"],\n color=\"survival_probability\", animation_frame=\"time\", animation_group=\"time\",\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10, height=500)\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n fig.show()\n \"\"\"\n\n\n\n #config.saveTrees((srrf,)) ###config.saveTree is giving us an eror type error: unable to pickle dict keys.\n\n #print('numruns: %s' % (config.num_runs))\n #print(results)\n\n\n #return results", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def forest(self):\n\n forest_parameters = [{'n_estimators': hel.powerlist(10, 2, 4),\n 'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1], 'n_jobs': [-1]}]\n forest_grid = GridSearchCV(estimator=RandomForestRegressor(),\n param_grid=forest_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n forest_grid_result = forest_grid.fit(self.X_train, self.y_train)\n best_forest_parameters = forest_grid_result.best_params_\n forest_score = forest_grid_result.best_score_\n print('Best forest params: ' + str(best_forest_parameters))\n print('Forest score: ' + str(forest_score))\n return RandomForestRegressor(\n n_estimators=best_forest_parameters['n_estimators'],\n min_samples_leaf=best_forest_parameters['min_samples_leaf'],\n criterion=best_forest_parameters['criterion'],\n random_state=1, n_jobs=-1)", "def forest(input_layer,\r\n var_prediction,\r\n var_explanatory,\r\n trees,\r\n max_tree_depth=None,\r\n random_vars=None,\r\n sample_size=100,\r\n min_leaf_size=None,\r\n prediction_type=\"train\",\r\n features_to_predict=None,\r\n validation=10,\r\n importance_tbl=False,\r\n exp_var_matching=None,\r\n output_name=None,\r\n gis=None):\r\n allowed_prediction_types = {\r\n 'train' : \"Train\",\r\n 'trainandpredict' : 'TrainAndPredict'\r\n\r\n }\r\n if str(prediction_type).lower() not in allowed_prediction_types:\r\n raise ValueError(\"Invalid Prediction type.\")\r\n else:\r\n prediction_type = allowed_prediction_types[prediction_type.lower()]\r\n\r\n kwargs=locals()\r\n\r\n gis=_arcgis.env.active_gis if gis is None else gis\r\n\r\n if gis.version < [7]:\r\n return None\r\n url=gis.properties.helperServices.geoanalytics.url\r\n\r\n params={}\r\n for key, value in kwargs.items():\r\n if value is not None:\r\n params[key]=value\r\n\r\n if output_name is None:\r\n output_service_name='Forest Based Regression_' + _id_generator()\r\n output_name=output_service_name.replace(' ', '_')\r\n else:\r\n output_service_name=output_name.replace(' ', '_')\r\n\r\n output_service=_create_output_service(gis, output_name, output_service_name, 'Forest Based Classification And Regression')\r\n\r\n params['output_name'] = _json.dumps({\r\n \"serviceProperties\": {\"name\" : output_name, \"serviceUrl\" : output_service.url},\r\n \"itemProperties\": {\"itemId\" : output_service.itemid}})\r\n\r\n\r\n _set_context(params)\r\n\r\n\r\n\r\n param_db={\r\n \"input_layer\": (_FeatureSet, \"inFeatures\"),\r\n \"prediction_type\" : (str, \"predictionType\"),\r\n \"features_to_predict\" : (_FeatureSet, \"featuresToPredict\"),\r\n \"var_prediction\" : (dict, \"variablePredict\"),\r\n \"var_explanatory\" : (list, \"explanatoryVariables\"),\r\n \"exp_var_matching\" : (list, \"explanatoryVariableMatching\"),\r\n \"return_importance_table\" : (bool, \"returnVariableOfImportanceTable\"),\r\n \"trees\" : (int, \"numberOfTrees\"),\r\n \"max_tree_depth\" : (int, \"maximumTreeDepth\"),\r\n \"min_leaf_size\" : (int, \"minimumLeafSize\"),\r\n \"sample_size\" : (int, \"sampleSize\"),\r\n \"random_vars\" : (int, \"randomVariables\"),\r\n \"validation\" : (float, \"percentageForValidation\"),\r\n \"output_name\" : (str, \"outputTrainedName\"),\r\n \"context\": (str, \"context\"),\r\n \"importance_tbl\" : (bool, \"createVariableOfImportanceTable\"),\r\n \"output_trained\": (_FeatureSet, \"outputTrained\"),\r\n \"output_predicted\": (_FeatureSet, \"outputPredicted\"),\r\n \"variable_of_importance\": (_FeatureSet, \"variableOfImportance\"),\r\n }\r\n return_values=[\r\n {\"name\": 'output_trained', \"display_name\": \"Output Features\", \"type\": _FeatureSet},\r\n {\"name\" : \"output_predicted\", \"display_name\" : \"Output Predicted\", \"type\" : _FeatureSet},\r\n {\"name\" : \"variable_of_importance\", \"display_name\" : \"Variable of Importance\", \"type\" : _FeatureSet}\r\n ]\r\n if features_to_predict is None and prediction_type == 'TrainAndPredict':\r\n kwargs[\"features_to_predict\"] = input_layer\r\n #param_db.pop(\"features_to_predict\")\r\n try:\r\n res = _execute_gp_tool(gis, \"ForestBasedClassificationAndRegression\", params, param_db, return_values, _use_async, url, True)\r\n return output_service\r\n except:\r\n output_service.delete()\r\n raise\r\n\r\n return", "def mutate(self):\n for forest in self._forests:\n forest.mutate(self._fullInput)", "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def tuneRandomForest(train_set):\n\n auc_score = make_scorer(roc_auc_score)\n acc = make_scorer(accuracy_score)\n\n train_set = pd.read_csv(train_set, sep=\"\\t\", low_memory=False)\n\n train_output = train_set[\"output\"].values\n train_features = train_set[train_set.columns.drop([\"labels\", \"output\"])].values\n\n #X_train, X_test, y_train, y_test = train_test_split(train_features, train_output, test_size=0.20)\n\n # define parameters to be optimized\n parameters = {\n 'n_estimators': [int(x) for x in range(200, 3000, 300)],\n 'max_features': ['log2', 'sqrt', \"auto\"],\n 'criterion': [\"gini\", \"entropy\"],\n }\n #plotGrid(parameters, script_path + \"/results/GridSearchPlot.png\")\n\n scores = ['precision', 'recall', 'f1', auc_score, acc] # compute efficiency based on scores\n for score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n\n tune_search = GridSearchCV(\n RandomForestClassifier(n_jobs=-1),\n parameters,\n scoring=score\n )\n #tune_search.fit(X_train, y_train)\n tune_search.fit(train_features, train_output)\n print(tune_search.best_params_)\n\n means = tune_search.cv_results_['mean_test_score']\n stds = tune_search.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, tune_search.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n\n #y_true, y_pred = y_test, tune_search.predict(X_test)\n # print(classification_report(y_true, y_pred))\n #print()", "def runner(self):\n\n print('[ INFO ]: Initializing the forest fires program runner...')\n\n df, features, predictor = self.preprocess()", "def test_forest_dml(self):\n\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedForestDML(model_y=automl_model_reg(),\n model_t=GradientBoostingClassifier(),\n discrete_treatment=True,\n n_estimators=1000,\n subsample_fr=.8,\n min_samples_leaf=10,\n min_impurity_decrease=0.001,\n verbose=0, min_weight_fraction_leaf=.01)\n est.fit(Y, T, X=X)\n _ = est.effect(X)", "def run_isolation_forest(file_path):\n\n features_list = ['Direction', 'Speed']\n df_train = pd.read_csv(f'{file_path}/without_anom.csv')\n\n df_train = df_train[features_list]\n\n scalar = MaxAbsScaler()\n\n X_train = scalar.fit_transform(df_train)\n\n random_model = MultiOutputRegressor(\n RandomForestRegressor(max_depth=2, max_features=\"sqrt\")\n )\n\n # lab_enc = preprocessing.LabelEncoder()\n # training_scores_encoded = lab_enc.fit_transform(X_train)\n random_model.fit(X_train, X_train)\n pred = random_model.predict(X_train)\n # isolation_model = MultiOutputRegressor(IsolationForest()).fit(X_train)\n # pred = isolation_model.predict(X_train)\n test_path = \"C:\\\\Users\\\\Yehuda Pashay\\\\Desktop\\\\fligth_data\\\\data_set\\\\test\\\\chicago_to_guadalajara\\\\down_attack\"\n df_test = pd.read_csv(f'{test_path}/sensors_8.csv')\n df_test = df_test[features_list]\n\n Y_test = scalar.transform(df_test)\n test_pred = random_model.predict(Y_test)\n a = 4", "def run_iso_forest_test(args):\n test_file, outdir = args\n start = time.time()\n n_estimators = config.iso_forest.n_estimators\n max_samples = 'auto'\n contamination = config.iso_forest.contamination\n max_features = 1.0 # default is 1.0 (use all features)\n bootstrap = False\n outfile_path1, outfile_path2 = FileName.get_iso_forest_rst_name()\n run_iso_forest(test_file, rs, n_estimators, max_samples, contamination,\n max_features, bootstrap, outfile_path1, outfile_path2)\n dt = time.time() - start\n print(\"run_iso_forest_test Done. Elapsed time is %.2f seconds.\" % dt)", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def apply(self, X):\n return self.forest.apply(X)", "def optimize_rf(data, targets):\r\n def rfc_crossval(n_estimators, min_samples_split, max_features,max_depth,loss_n):\r\n\r\n return rfc_cv(\r\n n_estimators=int(n_estimators),\r\n min_samples_split=int(min_samples_split),\r\n max_features=int(round(max_features)),\r\n max_depth=int(max_depth),\r\n loss_n=loss_n,\r\n data=data,\r\n targets=targets,\r\n )\r\n\r\n optimizer = BayesianOptimization(\r\n f=rfc_crossval,\r\n pbounds={\r\n \"n_estimators\": (10, 5000),\r\n \"min_samples_split\": (2, 25),\r\n \"max_features\": (1, 8),\r\n \"max_depth\":(2,10),\r\n 'loss_n': (0, 1)\r\n },\r\n random_state=1234,\r\n verbose=2\r\n )\r\n logger=JSONLogger(path=\"./randomlogs.json\")\r\n optimizer.subscribe(Events.OPTMIZATION_STEP,logger)\r\n optimizer.maximize(init_points=50,n_iter=300)\r\n with open('./randomlogs.json','a',encoding='utf-8')as f:\r\n f.write(str(optimizer.max))", "def main(args):\n if args.train_test_split < 0.2 or args.train_test_split > 0.8:\n print(\"Bad value for train_test_split, range is 0.2 - 0.8\")\n sys.exit()\n\n dataset = pd.read_csv(args.train_file)\n\n x_data = dataset.loc[:, (dataset.columns != args.classification_column) \\\n & (dataset.columns != \"Survey_id\")]\n y_data = dataset[args.classification_column].to_numpy()\n dataset_headers = list(x_data.columns)\n x_data = x_data.fillna(0).to_numpy()\n\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, \\\n test_size=args.train_test_split)\n\n\n dtc = DecisionTreeClassifier(max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n dtc = dtc.fit(x_train, y_train)\n dtc_score = dtc.score(x_test, y_test)\n\n\n export_graphviz(dtc, out_file=\"decision_tree.dot\", feature_names=dataset_headers, \\\n rounded=True, precision=1, filled=True)\n os.system(\"dot -Tpng decision_tree.dot -o decision_tree.png\")\n\n\n rfc = RandomForestClassifier(n_estimators=args.estimators, max_depth=args.max_depth, \\\n min_impurity_split=args.acceptable_impurity)\n rfc.fit(x_train, y_train)\n rfc_score = rfc.score(x_test, y_test)\n\n file = open('result.txt', 'w')\n file.write(f'Decisions tree score = {dtc_score}\\n')\n file.write(f'Random forest score = {rfc_score}\\n')\n file.close()", "def grbefgs(self):\n print('Performing GrBeFGS\\n')\n\n frontier = PriorityFrontier()\n\n initial_heuristic = self.get_heuristic(self.initial_state)\n initial_node = SearchNode(self.initial_state)\n frontier.insert(initial_node, initial_heuristic)\n\n visited_nodes = set()\n \n while True:\n if frontier.is_empty():\n # Search failure\n return GenericResult(failure=True)\n \n # Get the next leaf node from the frontier\n leaf_node = frontier.pop()\n \n # Add this node to the visited nodes set\n visited_nodes.add(leaf_node)\n \n # Check for the goal state\n if self.check_goal_state(leaf_node.state):\n # Search success\n # Return final state and list of actions along path to the goal\n # as part of the GenericResult class solution member\n return GenericResult(solution=Solution(final_state=leaf_node.state, actions=self.get_action_path(leaf_node)))\n \n # Generate all possible actions for the given state\n actions = self.get_actions(leaf_node.state)\n \n # Create search nodes from the generated actions\n for action in actions:\n # Generate a new state from the given action\n new_state = self.get_result(leaf_node.state, action)\n \n # Get the new state's heuristic\n new_heuristic = self.get_heuristic(new_state)\n\n # Create a new search node with the created state\n new_node = SearchNode(new_state, leaf_node, action)\n \n # If this node has already been visited, ignore it\n if new_node in visited_nodes:\n continue\n\n # Check for any nodes with the same state as new_state and with better h values that \n # have yet to be visited in the frontier before adding new_node\n if new_node in frontier:\n frontier_node = frontier.peek_node(new_node)\n frontier_heuristic = self.get_heuristic(frontier_node.state)\n\n if frontier_heuristic <= new_heuristic:\n # The original heuristic was less than or equal to the new node\n # Disregard the new node\n continue\n \n else:\n # The new node's heuristic is larger\n # Remove the original node from the frontier\n frontier.remove_node(frontier_node)\n \n # Add the new node to the frontier\n frontier.insert(new_node, new_heuristic)", "def fit(self, X, y, dview = None):\n\t\t#Get classes\n\t\tclasses, y[:] = numpy.unique(y[:], return_inverse=True)\n\t\tself.classes_ = classes\n\t\tself.n_classes_ = classes.shape[0]\n\t\tforests = []\n\n\t\tfeatureFunction = self.featureFunction\n\t\tfor i in range(self.n_forests):\n\t\t\tprint(\"forest : \",i+1,\" / \",self.n_forests)\n\t\t\tif (i != 0):\n\t\t\t\tif(self.specialisation == 'global'):\n\t\t\t\t\tacc = forest.getFeatureImportance()\n\t\t\t\t\tfeatureFunction.random_weight = acc\n\t\t\t\telif(self.specialisation =='per_class'):\n\t\t\t\t\tacc_per_class = forest.getFeatureImportanceByClass()\n\t\t\t\t\tfeatureFunction.random_weight_per_class = acc_per_class\n\n\t\t\tforest = deepcopy(self.forest)\n\t\t\tforest.featureFunction = featureFunction\n\t\t\tforest.fit(X, y, dview)\n\t\t\tforests.append(forest)\n\n\t\t# Collect newly grown Forests\n\t\tself.forests_.extend(forests)", "def forest_model(params):\n if (params['random']):\n params['n_estimators'] = random.choice([1, 3, 5, 10, 20, 30, 40, 50, 75, 100])\n model = ExtraTreesClassifier(\n n_estimators=params['n_estimators'],\n random_state=0\n )\n\n return model", "def __init__(self, forest):\n self.forest = forest", "def build_random_forest(X_train, y_train):", "def run_rfc():\n num_folds = 5\n with pd.HDFStore('./OT_clr_train_LGG_grade.h5') as store:\n X = store['expression'].values\n Y = store['labels'].values\n\n # standardize expression\n mu = np.mean(X,axis=0)\n std = np.std(X, axis=0)\n X = (X-mu)/std\n\n # define Predictor object to manage nested CV\n rf_predictor = Predictor(\n CVmodel(RandomForestClassifier_skl,[4,8,16,32,64,128], 'max_depth',\n n_estimators=100, n_jobs=-1),\n scorers.accuracy_scorer)\n # cross validate\n rf_cross_validation_scores = \\\n rf_predictor.cross_validate(X, Y,\n outer_folds=num_folds, inner_folds=num_folds)\n logger.info('Random Forest cross-validation = {0:.3f}'.format(\n np.mean(rf_cross_validation_scores)))", "def train_random_forest():\n train_model(RandomForestRegressor(max_depth=4, random_state=42),\n dataset_file_name=RANDOM_FOREST_DEFAULT_DATASET,\n model_file_name=RANDOM_FOREST_DEFAULT_MODEL)", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def __init__(self, n_trees=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, \n max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,\n verbose=0, min_density=None, compute_importances=None): \n self.random_forest = RandomForestClassifier(n_trees, criterion, max_depth, min_samples_split, min_samples_leaf, \n max_features, max_leaf_nodes, bootstrap, oob_score, n_jobs, random_state,\n verbose, min_density, compute_importances)", "def fit_image(self, array_data, sample_index, y, dview = None):\n\n\t\t#Get classes\n\t\tclasses, y[:] = numpy.unique(y[:], return_inverse=True)\n\t\tself.classes_ = classes\n\t\tself.n_classes_ = classes.shape[0]\n\n\t\tforests = []\n\t\tfeatureFunction = self.featureFunction\n\t\tSWx,SWy = featureFunction.width, featureFunction.height\n\n\t\tif(self.use_geodesic):\n\t\t\tpan = array_data[0]\n\t\t\tself.geodesic_cost = nd.gaussian_gradient_magnitude(pan, self.geodesic_sigma)\n\n\t\tif(self.n_steps_simple is None and self.n_steps_proba is None):\n\t\t\tfor i in range(self.n_forests):\n\t\t\t\tprint(\"forest : \",i+1,\" / \",self.n_forests)\n\t\t\t\tif (i != 0):\n\t\t\t\t\tif(self.specialisation == 'global'):\n\t\t\t\t\t\tacc = forest.getFeatureImportance()\n\t\t\t\t\t\tfeatureFunction.random_weight = acc\n\t\t\t\t\telif(self.specialisation =='per_class'):\n\t\t\t\t\t\tacc_per_class = forest.getFeatureImportanceByClass()\n\t\t\t\t\t\tfeatureFunction.random_weight_per_class = acc_per_class\n\t\t\t\t\tif(self.add_previous_prob):\n\t\t\t\t\t\tproba = forest.predict_proba_image(array_data,SWx,SWy)\n\t\t\t\t\t\tif(self.use_geodesic):\n\t\t\t\t\t\t\tproba = self.geodesic(proba)\n\t\t\t\t\t\tarray_data = numpy.concatenate((array_data,proba))\n\t\t\t\t\t\tfeatureFunction.nb_channels = array_data.shape[0]\n\n\t\t\t\tforest = deepcopy(self.forest)\n\t\t\t\tforest.featureFunction = featureFunction\n\t\t\t\tforest.fit_image(array_data, sample_index, y, dview)\n\t\t\t\tforests.append(forest)\n\t\telse:\n\t\t\tn_forests = 0\n\t\t\tfor step_proba in range(self.n_steps_proba):\n\t\t\t\tfor step_simple in range(self.n_steps_simple):\n\t\t\t\t\tprint(\"step_proba : \" ,step_proba +1,\" / \",self.n_steps_proba)\n\t\t\t\t\tprint(\"step_simple : \",step_simple+1,\" / \",self.n_steps_simple)\n\t\t\t\t\tif (step_simple != 0):\n\t\t\t\t\t\tif(self.specialisation == 'global'):\n\t\t\t\t\t\t\tacc = forest.getFeatureImportance()\n\t\t\t\t\t\t\tfeatureFunction.random_weight = acc\n\t\t\t\t\t\telif(self.specialisation =='per_class'):\n\t\t\t\t\t\t\tacc_per_class = forest.getFeatureImportanceByClass()\n\t\t\t\t\t\t\tfeatureFunction.random_weight_per_class = acc_per_class\n\t\t\t\t\t\t#if specialisation\n\t\t\t\t\telse :\n\t\t\t\t\t\tfeatureFunction.random_weight = None\n\t\t\t\t\t\tfeatureFunction.random_weight_per_class = None\n\t\t\t\t\t#if step_simple !=0\n\t\t\t\t\tif (step_proba != 0) and (step_simple == 0):\n\t\t\t\t\t\tproba = forest.predict_proba_image(array_data,SWx,SWy)\n\t\t\t\t\t\tif(self.use_geodesic):\n\t\t\t\t\t\t\tproba = self.geodesic(proba)\n\t\t\t\t\t\t#if use_geodesic\n\t\t\t\t\t\tarray_data = numpy.concatenate((array_data,proba))\n\t\t\t\t\t\tfeatureFunction.nb_channels = array_data.shape[0]\n\t\t\t\t\t#if (step_proba != 0) and (step_simple=0):\n\t\t\t\t\tforest = deepcopy(self.forest)\n\t\t\t\t\tforest.featureFunction = featureFunction\n\t\t\t\t\tforest.fit_image(array_data, sample_index, y, dview)\n\t\t\t\t\tforests.append(forest)\n\t\t\t\t\tn_forests +=1\n\t\t\t\t#for step_simple\n\t\t\t#for step_proba\n\t\t\tself.n_forests = n_forests\n\n\t\t# Collect newly grown Forests\n\t\tself.forests_.extend(forests)", "def run_rbfopt(self):\n folder_name = os.path.join(self.package_directory, '../data/optimization_comparisons/rbfopt')\n self.mug_pipeline.set_folder_names(folder_name)\n self.mug_pipeline.set_optimizer_type(OptimizerType.RBFOPT)\n\n # file_q = manager.Queue()\n # filename = '{}/results.csv'.format(folder_name)\n # watcher = Process(target=self.listener, args=(file_q, filename))\n # watcher.start()\n\n while True:\n try:\n print('starting RBFOpt', flush=True)\n bb = rbfopt.RbfoptUserBlackBox(\n self.num_vars, \n self.mug_lower_bounds, self.mug_upper_bounds,\n np.array(['R'] * self.num_vars), self.mug_pipeline.run_inference)\n settings = rbfopt.RbfoptSettings(max_evaluations=self.max_iterations)\n alg = rbfopt.RbfoptAlgorithm(settings, bb)\n objval, x, itercount, evalcount, fast_evalcount = alg.optimize()\n state_path = os.path.join(folder_name, 'state.dat')\n # print(state_path)\n alg.save_to_file(state_path)\n except FoundCounterexample:\n # restart\n pass", "def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()", "def random_forest_regression(dataset,\n model=saved_pickle_model,\n fit=False):\n\n\n # Preparing the training and test sets\n # ------------------------------------\n # Exoplanet and Solar system dataset\n dataset_exo = dataset[:501]\n dataset_sol = dataset[501:]\n\n # Separating the data into dependent and independent variables\n features = dataset_exo.iloc[:, :-1] # mass, teq, etc\n labels = dataset_exo.iloc[:, -1] # radius\n\n # Splitting the dataset into the Training set and Test set\n X_train, X_test, y_train, y_test = train_test_split(features,\n labels,\n test_size=0.25,\n random_state=0)\n features_sol = dataset_sol.iloc[:, :-1]\n labels_sol = dataset_sol.iloc[:, -1]\n\n X_train_sol, X_test_sol, y_train_sol, y_test_sol = train_test_split(features_sol,\n labels_sol,\n test_size=0.25,\n random_state=0)\n\n X_train = X_train.append(X_train_sol)\n y_train = y_train.append(y_train_sol)\n X_test = X_test.append(X_test_sol)\n y_test = y_test.append(y_test_sol)\n\n # Outliers in the sample\n # Remove HATS-12 b from the training set\n X_test = X_test.drop(['HATS-12 b'])\n y_test = y_test.drop(labels=['HATS-12 b'])\n print('\\nHATS-12 b removes from test set\\n')\n\n # Remove K2-95 b from the training set\n X_train = X_train.drop(['K2-95 b'])\n y_train = y_train.drop(labels=['K2-95 b'])\n print('\\nK2-95 b removes from training set\\n')\n\n # Remove Kepler-11 g from the training set\n X_train = X_train.drop(['Kepler-11 g'])\n y_train = y_train.drop(labels=['Kepler-11 g'])\n print('\\nKepler-11 g removes from training set\\n')\n\n train_test_values = [X_train.values, X_test.values,\n y_train.values, y_test.values]\n train_test_sets = [X_train, X_test, y_train, y_test]\n\n # Fitting the hyperparameters of the random forest model\n # with the grid search method\n # ------------------------------------------------------\n if fit:\n # Setting up the grid of hyperparameters\n rf = GridSearchCV(RandomForestRegressor(),\n param_grid={'n_estimators': np.arange(80, 200),\n 'max_depth': np.arange(4, 10),\n 'max_features': np.arange(3, 6),\n 'min_samples_split': np.arange(4, 5)},\n cv=3, verbose=1, n_jobs=-1)\n\n # Fitting training set - finding best hyperparameters\n rf.fit(X_train, y_train)\n\n # Best hyperparameters found by the grid search\n print(rf.best_params_)\n\n # Random forest model with the best hyperparameters\n regr = RandomForestRegressor(n_estimators=rf.best_params_['n_estimators'],\n max_depth=rf.best_params_['max_depth'],\n max_features=rf.best_params_['max_features'],\n min_samples_split=rf.best_params_['min_samples_split'],\n random_state=0, oob_score=True)\n\n # Saving the random forest model in a file\n outdir = 'bem_output'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n name_Rf = 'r2_' + str(round(rf.best_score_, 2)) + '_' + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")) + '.pkl'\n name_Rf = os.path.join(outdir, name_Rf)\n\n joblib.dump(regr, name_Rf)\n print('RF model save in : ', name_Rf)\n\n else:\n # Loading the random forest model saved\n print('Loading random forest model: ', model)\n regr = joblib.load(model)\n\n # Fit the best random forest model to the training set\n # ----------------------------------------------------\n regr.fit(X_train, y_train)\n\n # Predict the radius for the training and testing sets\n y_train_predict = regr.predict(X_train)\n y_test_predict = regr.predict(X_test)\n\n # Scores of the random forest\n test_score = r2_score(y_test, y_test_predict)\n pearson = pearsonr(y_test, y_test_predict)\n print(f'Test set, R-2 score: {test_score:>5.3}')\n print(f'\\nTest set, Pearson correlation: {pearson[0]:.3}')\n\n # Mean squared errors of the train and test set\n print('Root mean squared errors')\n print('Train set: ', np.sqrt(np.mean((y_train-y_train_predict)**2)),\n '\\nTest set: ', np.sqrt(np.mean((y_test-y_test_predict)**2)))\n\n # Feature importance\n name_features = dataset.columns.tolist()\n print('\\nFeature importance')\n _ = [print(name, ': \\t', value)\n for name, value\n in zip(name_features, regr.feature_importances_)]\n\n return regr, y_test_predict, train_test_values, train_test_sets", "def cli(train_prefix, part_rangestart, # pylint: disable=too-many-branches, too-many-locals, too-many-statements, too-many-arguments\n finalize=False, debug_run=False):\n rangestart = part_rangestart\n pref = 'forest'\n pref += '_' + str(part_rangestart)\n if finalize:\n pref += '_final'\n if debug_run:\n pref += '_debug'\n out_fp = path.join(OUT_DIR, pref + '.z')\n LOGGER.info(\"Running for configuration `%s`.\", out_fp)\n LOGGER.info(\"Loading data...\")\n train_dta, train_annot, val_dta, val_annot = get_data( # pylint: disable=unused-variable\n train_prefix, part_rangestart, finalize, debug_run)\n # Checks.\n if rangestart > 0 and rangestart < 82:\n # Rotation matrices.\n assert train_annot.max() <= 1.\n assert train_annot.min() >= -1.\n assert val_annot.max() <= 1.\n assert val_annot.min() >= -1.\n import sklearn.ensemble\n rf = sklearn.ensemble.RandomForestRegressor(n_jobs=available_cpu_count())\n LOGGER.info(\"Fitting...\")\n rf.fit(train_dta, train_annot)\n LOGGER.info(\"Writing results...\")\n joblib.dump(rf, out_fp, compress=True)\n LOGGER.info(\"Done.\")", "def optimize(params, x, y):\n\n # initialize model with current parameters\n model = ensemble.RandomForestClassifier(**params)\n\n # initialize stratified k fold\n kf = model_selection.StratifiedKFold(n_splits=5)\n\n # initialize accuracy list\n accuracies = []\n\n # loop over all folds\n for idx in kf.split(X=x, y=y):\n train_idx, test_idx = idx[0], idx[1]\n xtrain = x[train_idx]\n ytrain = y[train_idx]\n\n xtest = x[test_idx]\n ytest = y[test_idx]\n\n # fit model on train data\n model.fit(xtrain, ytrain)\n\n # create predictions\n preds = model.predict(xtest)\n\n # calculate and append accuracy\n accuracy = metrics.accuracy_score(ytest, preds)\n accuracies.append(accuracy)\n\n # return negative accuracy\n return -1 * np.mean(accuracies)", "def rf_tuning(n_estimators, min_samples_leaf, min_samples_split, max_leaf_nodes, max_features, max_depth, train_x,\n test_x, train_y, test_y):\n rf_tuning = RandomForestClassifier(n_estimators=n_estimators, min_samples_leaf=min_samples_leaf,\n min_samples_split=min_samples_split, max_leaf_nodes=max_leaf_nodes,\n max_features=max_features, max_depth=max_depth)\n rf_tuning.fit(train_x, train_y)\n predictions = rf_tuning.predict(test_x)\n recall = recall_score(test_y, predictions, average=\"macro\")\n return recall", "def fit_tree_stump_forest(X_train: np.ndarray, y_train: np.ndarray, n_estimators: int) -> RandomForestClassifier:\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf = clf.fit(X_train, y_train)\n return clf", "def findRFBestN():\n resultList = []\n BestScore = 0\n nList = [ n for n in range(1,200) if n%10 == 0]\n for n in nList:\n rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_n = n\n resultList += [[n, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding n_estimator is: ')\n return BestScore, best_n", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def ensemble_001():\n n_centroids = 3000\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_ensemble_001',\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n X = kmeans_generator.transform(images, save_to_file='data/data_ensemble_001.npy', memmap=True)\n Y = classes.train_solutions.data\n\n # Unload some objects\n del images\n gc.collect()\n\n # Get the input for the RF so that we can split together\n sampler = SampleTransformer(training=True, steps=2, step_size=20, n_jobs=-1)\n pX = sampler.transform()\n\n # manual split of train and test\n train_x, test_x, ptrain_x, ptest_x, train_y, test_y = train_test_split(X, pX, Y, test_size=0.5)\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 500}, n_jobs=-1)\n wrapper.fit(train_x, train_y)\n kmeans_preds = wrapper.predict(test_x)\n\n pWrapper = ModelWrapper(RandomForestRegressor, {'n_estimators': 500, 'verbose': 3}, n_jobs=-1)\n pWrapper.fit(ptrain_x, train_y)\n pixel_preds = pWrapper.predict(ptest_x)\n\n logger.info('Kmeans')\n classes.colwise_rmse(kmeans_preds, test_y)\n classes.rmse(kmeans_preds, test_y)\n logger.info('Pixel RF')\n classes.colwise_rmse(pixel_preds, test_y)\n classes.rmse(pixel_preds, test_y)\n\n logger.info(\"Ensembling predictions\")\n etrain_x = np.hstack((wrapper.predict(train_x), pWrapper.predict(ptrain_x)))\n etest_x = np.hstack((kmeans_preds, pixel_preds))\n eWrapper = ModelWrapper(RandomForestRegressor, {'n_estimators': 500, 'verbose': 3}, n_jobs=-1)\n eWrapper.fit(etrain_x, train_y)\n ensemble_preds = eWrapper.predict(etest_x)\n classes.colwise_rmse(ensemble_preds, test_y)\n classes.rmse(ensemble_preds, test_y)", "def optimize(self, trial):\r\n num_leaves = trial.suggest_int(\"num_leaves\", 6, 50)\r\n min_child_samples = trial.suggest_int(\"min_child_samples\", 100, 500)\r\n min_child_weight = trial.suggest_uniform(\"min_child_weight\", 1, 7)\r\n subsample = trial.suggest_uniform(\"subsample\", 0.6, 1)\r\n colsample_bytree = trial.suggest_uniform(\"colsample_bytree\", 0.6, 1)\r\n reg_alpha = trial.suggest_uniform(\"reg_alpha\", 0.1, 100)\r\n reg_lambda = trial.suggest_uniform(\"reg_lambda\", 0.1, 100)\r\n\r\n model = LGBMRegressor(\r\n num_leaves=num_leaves,\r\n min_child_samples=min_child_samples,\r\n min_child_weight=min_child_weight,\r\n subsample=subsample,\r\n colsample_bytree=colsample_bytree,\r\n reg_alpha=reg_alpha,\r\n reg_lambda=reg_lambda,\r\n )\r\n\r\n model = ModelTrainer(file_object=self.file_object).get_trained_model(\r\n model, self.X_train, self.y_train\r\n )\r\n r_squared, rmse = ModelScorer(file_object=self.file_object).get_model_scores(\r\n model, self.X_test, self.y_test\r\n )\r\n\r\n return r_squared", "def train(self, dataset): \n self.random_forest.fit(dataset[:,:-1], dataset[:,-1])", "def evaluate_random_forest(y_test, y_pred):", "def run_random_forest(rf_clf, training, testing, feature_cols, outcome_col):\n X_train, X_test = training[feature_cols].values, testing[feature_cols].values\n Y_train, Y_test = training[outcome_col].values, testing[outcome_col].values\n fitted_rf_model = rf_clf.fit(X_train, Y_train)\n rf_diagnostics = get_diagnostics(testing[outcome_col], testing[feature_cols], fitted_rf_model, 'rf')\n predicted_rf_probs = [p[1] for p in fitted_rf_model.predict_proba(X_test)]\n\n return fitted_rf_model, rf_diagnostics, predicted_rf_probs", "def __call__(self, fgraph):\r\n return self.optimize(fgraph)", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def findRFBestDepth():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n rforest = ensemble.RandomForestClassifier(max_depth=max_depth, n_estimators=100)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def experiment(improved):\n\n N_list = [5 ,10 ,20]\n K_list = [3 , 7 ,9]\n P_list = [0.3 , 0.4 , 0.5 ,0.6 ,0.7]\n\n data = pandas.read_csv('train.csv')\n\n avg_list = []\n for i in range(0,len(N_list) * len(K_list) * len(P_list)):\n avg_list.append([0 , None])\n\n kf = KFold(n_splits=5, shuffle=True, random_state=209418441)\n rotation_index = 1\n for train_index, test_index in kf.split(data):\n\n train = data.iloc[train_index]\n test = data.iloc[test_index]\n index = 0\n for n in N_list:\n for k in K_list:\n for p in P_list:\n\n #print('testing for N= ',n,', K = ',k, 'P = ',p)\n KNN = forest.KNN_forest(N=n, K=k, P=p, data = train , improved=improved)\n success_rate = utls.tests.succ_rate_test.test(test,KNN.Classify)\n avg_list[index][0] += success_rate\n avg_list[index][1] = (n,k,p)\n #print(' rate is: ',avg_list[index][0]/rotation_index)\n index += 1\n rotation_index +=1\n\n\n\n best_option = max(avg_list,key= lambda x:x[0])\n #print(' ****** DONE ******')\n #print('best n,k,p are : ' , best_option[1] , ' with success rate: ' , best_option[0])\n\n return best_option[1]", "def __init__(self, n_estimators=100, n_jobs=100,\n n_bins=2, min_leaf=1, max_depth=2, n_samples=1.0, max_features=\"auto\", \n bootstrap=True, random_state=42, criterion=\"auc_sub\", bias_method=\"avg\", \n compound_bias_method=\"avg\", orthogonality=.5):\n self.is_fit = False\n self.n_bins = n_bins\n self.n_jobs = n_jobs\n self.min_leaf = min_leaf\n self.max_depth = max_depth\n self.n_samples = n_samples\n self.criterion = criterion\n self.max_features = max_features\n self.bias_method = bias_method\n self.orthogonality = orthogonality\n self.bootstrap = bootstrap\n self.random_state = random_state \n self.n_estimators = n_estimators\n self.compound_bias_method = compound_bias_method\n \n\n # Generating FairRandomForest\n dts = [\n FairDecisionTreeClassifier(\n n_bins=self.n_bins,\n min_leaf=self.min_leaf,\n max_depth=self.max_depth,\n n_samples=self.n_samples,\n criterion=self.criterion,\n random_state=self.random_state+i,\n max_features=self.max_features,\n bias_method=self.bias_method,\n orthogonality=self.orthogonality,\n bootstrap=self.bootstrap,\n compound_bias_method=self.compound_bias_method,\n )\n for i in range(self.n_estimators)\n ]\n self.trees = dts", "def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount", "def train(\n cls, params: Dict[str, Any], ematrix: EMatrix, num_boost_round: int = 10\n ) -> \"EBooster\":\n start_params = {\n \"max_depth\": 5,\n \"learning_rate\": 0.3,\n \"splitgax\": False,\n \"transposed_feature\": False,\n \"progress_callback\": None,\n }\n start_params.update(params)\n\n reduce_axis = 1 if start_params[\"transposed_feature\"] else 0\n use_extra = ematrix.extra_features is not None\n\n if start_params[\"splitgax\"] and ematrix.gax is None:\n ematrix.gax = make_gax(ematrix.features, axis=reduce_axis)\n\n forest = []\n bias = np.zeros(ematrix.label.shape)\n features = ematrix.features\n for r in range(num_boost_round):\n print(f\"\\n{r} round\", file=sys.stderr)\n tree = build_tree(\n start_params,\n EMatrix(\n features=ematrix.features,\n label=ematrix.label,\n bias=bias,\n extra_features=ematrix.extra_features,\n gax=ematrix.gax,\n splitgax=start_params[\"splitgax\"],\n ),\n # split_maker=split_maker,\n transposed_feature=start_params[\"transposed_feature\"],\n unbalanced_penalty=start_params[\"unbalanced_penalty\"],\n reduce_axis=reduce_axis,\n use_extra=use_extra,\n )\n # print(\"tree ok, bias shape = {}\".format(bias.shape), file=sys.stderr)\n tree_arrays = init_arrays(\n root=tree,\n n=init_id(tree),\n weights_num=ematrix.extra_features.shape[1 - reduce_axis]\n if ematrix.extra_features is not None\n else 1,\n )\n bias_delta = tree_apply(\n tree_arrays=tree_arrays,\n features=features,\n extra_features=ematrix.extra_features,\n reduce_axis=reduce_axis,\n )\n # print(\"apply ok, bias delta shape = {}\".format(bias_delta.shape), file=sys.stderr)\n bias = bias + np.reshape(bias_delta, newshape=bias.shape)\n forest.append((tree, tree_arrays))\n # print(\"forest appended\", file=sys.stderr)\n if start_params[\"progress_callback\"] is not None:\n start_params[\"progress_callback\"](r, num_boost_round)\n\n return cls(forest)", "def Random_Forest(df, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn.ensemble import RandomForestRegressor\n # Define input\n X = df.drop(['target'], axis=1)\n # Set validation\n y = df['target']\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n rf = RandomForestRegressor(n_estimators= 250, min_samples_split= 3, min_samples_leaf= 2, max_features= 'sqrt',max_depth= 30,bootstrap=True ,random_state=10,verbose=True)\n clf = rf.fit(X_train, y_train)\n print('Linear Regression RMSE',compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def train(self, df, feature, max_range, extra=False, defender=False):\n\n df2 = self._train_preprocess(df, feature, extra)\n\n # No need for names anymore\n if defender:\n df2 = df2.drop([\"Player Id\"], axis=1)\n\n # Instantiate the models\n self.rfrg = RandomForestRegressor(n_estimators=1000, n_jobs=-1, random_state=69420)\n\n if not defender:\n self.gbrg = LGBMRegressor(n_estimators=1000, learning_rate=0.01)\n\n # Then, perform regression -> This is to see how it performs over weeks\n mean_error1 = []\n mean_error2 = []\n\n for week in range(max_range - 5, max_range):\n train = df2[df2['week'] < week]\n val = df2[df2['week'] == week]\n\n x_train, x_test = train.drop([feature], axis=1), val.drop([feature], axis=1)\n y_train, y_test = train[feature].values, val[feature].values\n\n self.rfrg.fit(x_train, y_train)\n preds1 = self.rfrg.predict(x_test)\n error1 = rmsle(y_test, preds1)\n print('Week %d - Error for Random Forest %.5f' % (week, error1))\n\n mean_error1.append(error1)\n if not defender:\n self.gbrg.fit(x_train, np.log1p(y_train))\n preds2 = np.expm1(self.gbrg.predict(x_test))\n error2 = rmsle(y_test, preds2)\n print('Week %d - Error for Gradient Boosting %.5f' % (week, error2))\n mean_error2.append(error2)\n\n print()\n print()\n print(\"Feature statistics:\")\n print(f\"Min value for feature {feature}: {df[feature].min()}\")\n print(f\"Max value for feature {feature}: {df[feature].max()}\")\n print(f\"Mean value for feature {feature}: {df[feature].mean()}\")\n print(f\"Standard deviation for feature {feature}: {df[feature].std()}\")\n print()\n print(\"Results\")\n print('Mean Error for Random Forest = %.5f' % np.mean(mean_error1))\n\n # Note: the final model is trained on every week and stored in self.model!\n final_xtrain = df2.drop([feature], axis=1)\n final_ytrain = df2[feature].values\n self.rfrg.fit(final_xtrain, final_ytrain)\n\n if not defender:\n print('Mean Error for Gradient Boosting = %.5f' % np.mean(mean_error2))\n self.gbrg.fit(final_xtrain, np.log1p(final_ytrain))", "def random_forest_001(outfile=\"sub_random_forest_001.csv\", n_jobs=1):\n model = models.RandomForest.RandomForestModel(n_jobs=n_jobs)\n model.run('train')\n predictions = model.run('predict')\n output = classes.Submission(predictions)\n output.to_file(outfile)", "def findBestModel(X_train, X_test, Y_test, model='iForest'):\n if model == 'iForest':\n total_score = 0;\n parameters = [0,0,0,0]\n for max_features in range(1,X_train.shape[1]+1):\n for contamination in range(1,101):\n iForest = IsolationForest(n_estimators = 100, max_features = max_features, contamination = contamination/1000, random_state = 0).fit(X_train)\n \n scores = []\n for x_test,y_test in zip(X_test,Y_test):\n y_hat = iForest.predict(x_test)\n score = evaluate(y_test,y_hat) # returns similarity percentage\n scores.append(score)\n \n if sum(scores) > total_score:\n total_score = sum(scores)\n parameters[0] = max_features\n parameters[1] = contamination/1000\n parameters[2] = total_score\n parameters[3] = scores\n print(parameters, contamination)\n \n return parameters", "def random_forest_002(outfile=\"sub_random_forest_002.csv\", n_jobs=4):\n mdl = models.RandomForest.RandomForestMoreFeatures(n_jobs=n_jobs, cv_sample=0.1)\n mdl.run('cv')", "def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))", "def test_uniform_search_produces_forest(graph):\n g = graph()\n \n if hasattr(g, 'edge_weight'):\n edge_weight = g.edge_weight\n else:\n edge_weight = defaultdict(int)\n\n # Create a visitor that will produce a forest\n class ForestVisitor(TraversalVisitor):\n def __init__(self):\n TraversalVisitor.__init__(self)\n self.forest = yaupon.Forest()\n \n def tree_edge(self, e):\n # This will throw from inside \"traverse\" if a cycle is created\n self.forest.add_edge(e[0],e[1])\n\n forest_visitor = ForestVisitor()\n traverse(g.vertices(), forest_visitor, \n uniform_cost_generator(g, edge_weight))", "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"md\",\n type=int,\n help=\"maximum depth\")\n parser.add_argument(\"mls\",\n type=int,\n help=\"minimum leaf samples\")\n parser.add_argument(\"--xTrain\",\n default=\"q4xTrain.csv\",\n help=\"filename for features of the training data\")\n parser.add_argument(\"--yTrain\",\n default=\"q4yTrain.csv\",\n help=\"filename for labels associated with training data\")\n parser.add_argument(\"--xTest\",\n default=\"q4xTest.csv\",\n help=\"filename for features of the test data\")\n parser.add_argument(\"--yTest\",\n default=\"q4yTest.csv\",\n help=\"filename for labels associated with the test data\")\n\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.xTrain)\n yTrain = pd.read_csv(args.yTrain)\n xTest = pd.read_csv(args.xTest)\n yTest = pd.read_csv(args.yTest)\n # create an instance of the decision tree using gini\n start = time.time()\n dt1 = DecisionTree('gini', args.md, args.mls)\n trainAcc1, testAcc1 = dt_train_test(dt1, xTrain, yTrain, xTest, yTest)\n print(\"GINI Criterion ---------------\")\n print(\"Training Acc:\", trainAcc1)\n print(\"Test Acc:\", testAcc1)\n dt = DecisionTree('entropy', args.md, args.mls)\n trainAcc, testAcc = dt_train_test(dt, xTrain, yTrain, xTest, yTest)\n print(\"Entropy Criterion ---------------\")\n print(\"Training Acc:\", trainAcc)\n print(\"Test Acc:\", testAcc)\n end = time.time()\n print(\"Time taken: \", end-start)", "def random_forest(path_m1a, path_non_m1a, repetitions, splits, trees, outfile):\n\n # Path to the output file comprised of a 1:1 ratio of m1A and non-m1A\n m1a_list = fill_list(path_m1a)\n non_m1a_list = fill_list(path_non_m1a)\n\n predictor_number = []\n for predic in predictors_in_use:\n predictor_number.append(predic)\n\n predictor_string = []\n for j in range(len(predictors_in_use)):\n if predictors_in_use[j] != 'pre_base':\n predictor_string.append(predictors_in_use[j])\n if pre_base:\n predictor_string.extend(['A', 'C', 'G', 'T'])\n predictor_number.extend(['A', 'C', 'G', 'T'])\n mean_feature_importance = [0] * (len(predictor_number) - 1)\n else:\n mean_feature_importance = [0] * len(predictor_number)\n\n # List for mean scores\n mean_sensitivity, mean_specificity, mean_ppv, mean_npv, mean_roc_auc, mean_mcc = [], [], [], [], [], []\n\n outfile.write('AUC' + '\\t' + 'Sensitivity' + '\\t' + 'Specificity' + '\\t' + 'PPV' + '\\t' + 'NPV' + '\\t' +\n 'MCC' + '\\t')\n\n predictors_in_use.append('mod_type')\n\n for j in range(repetitions):\n random.shuffle(m1a_list)\n random.shuffle(non_m1a_list)\n\n # Write equal numbers of m1As and non-m1As into a file\n temp_list = []\n for i in range(len(m1a_list)):\n temp_list.append(m1a_list[i].strip().split())\n temp_list.append(non_m1a_list[i].strip().split())\n\n # Build data pandas frame using all columns from the input file\n df = pd.DataFrame.from_records(temp_list, columns=predictor_features)\n # Remove columns that are not used\n for column in df.columns:\n if column not in predictors_in_use:\n df.drop(column, 1, inplace=True)\n\n # Change the modification type to numerical value\n df['mod_type'] = df['mod_type'].map({temp_list[0][-1]: 1, temp_list[1][-1]: 0})\n\n # Get categorical values (pre_base). This function creates 4 more columns in the pandas data frame (A, C, G, T).\n # Column 'pre_base' will be removed\n if pre_base:\n one_hot = pd.get_dummies(df['pre_base'])\n df.drop('pre_base', 1, inplace=True)\n df = df.join(one_hot)\n\n df_clean = df.dropna()\n df_clean.describe()\n\n # Use all values except for 'mod_type' as predictors\n predictors = df_clean[predictor_string]\n predictors = predictors.as_matrix()\n\n targets = df_clean.mod_type\n\n skf = StratifiedKFold(n_splits=splits, shuffle=True, random_state=None)\n forest = RandomForestClassifier(n_estimators=trees, criterion='gini', max_depth=None, max_features='sqrt',\n n_jobs=-1, warm_start=True, oob_score=True, random_state=None)\n\n splits_mean_roc, splits_sensitivity, splits_specificity, splits_ppv, splits_npv, splits_mcc = 0, 0, 0, 0, 0, 0\n\n\tif pre_base:\n temp_feature_importance = [0] * (len(predictor_number) - 1)\n else:\n temp_feature_importance = [0] * len(predictor_number)\n\t\n\t# Random forest training + testing\n for train, test in skf.split(predictors, targets):\n x_train, x_test = predictors[train], predictors[test]\n y_train, y_test = targets[train], targets[test]\n\n forest.fit(x_train, y_train)\n test_prediction = forest.predict(x_test)\n\n false_pos, true_pos, _ = roc_curve(y_test, test_prediction)\n roc_auc = auc(false_pos, true_pos)\n splits_mean_roc = splits_mean_roc + roc_auc * 100\n for k in range(len(forest.feature_importances_)):\n temp_feature_importance[k] = temp_feature_importance[k] + forest.feature_importances_[k]\n\n false_pos, true_pos, _ = roc_curve(y_test, test_prediction)\n\n # Build confusion matrix and calculate relevant values for statistical analysis\n cm = pd.crosstab(y_test, test_prediction, rownames=['Actual Class'], colnames=['Predicted Class'])\n TN = cm[0][0]\n FP = cm[0][1]\n FN = cm[1][0]\n TP = cm[1][1]\n sensitivity = (TP / (TP + FN)) * 100\n specificity = (TN / (FP + TN)) * 100\n ppv = (TP / (TP + FP)) * 100\n npv = (TN / (TN + FN)) * 100\n mcc = ((TP * TN - FP * FN) / (sqrt((TP + FP)*(TP + FN)*(TN + FP)*(TN + FN)))) * 100\n\n splits_sensitivity = splits_sensitivity + sensitivity\n splits_specificity = splits_specificity + specificity\n splits_ppv = splits_ppv + ppv\n splits_npv = splits_npv + npv\n splits_mcc = splits_mcc + mcc\n\n # Calculate the averages of n splits\n mean_sensitivity.append(splits_sensitivity / skf.n_splits)\n mean_specificity.append(splits_specificity / skf.n_splits)\n mean_ppv.append(splits_ppv / skf.n_splits)\n mean_npv.append(splits_npv / skf.n_splits)\n mean_mcc.append(splits_mcc / skf.n_splits)\n mean_roc_auc.append(splits_mean_roc / skf.n_splits)\n for l in range(len(temp_feature_importance)):\n mean_feature_importance[l] = mean_feature_importance[l] + temp_feature_importance[l] / skf.n_splits\n\n # Calculate the overall averages of x repetitions\n print('Sensitivity: ', sum(mean_sensitivity) / repetitions)\n print('specificity: ', sum(mean_specificity) / repetitions)\n print('Positive predicted value (PPV): ', sum(mean_ppv) / repetitions)\n print('Negative predicted value (NPV): ', sum(mean_npv) / repetitions)\n print('MCC: ', sum(mean_mcc) / repetitions)\n print('AUC: ', sum(mean_roc_auc) / repetitions)\n\n outfile.write(str((sum(mean_sensitivity) / repetitions)) + '\\t' + str((sum(mean_specificity) / repetitions)) +\n '\\t' + str((sum(mean_ppv) / repetitions)) + '\\t' + str((sum(mean_npv) / repetitions)) + '\\t' +\n str((sum(mean_mcc) / repetitions)) + '\\t' + str((sum(mean_roc_auc) / repetitions)) + '\\t')\n for j in range(len(mean_feature_importance)):\n outfile.write(str(mean_feature_importance[j] / repetitions) + '\\t')\n outfile.write('\\n')\n \n\n with open(sys.argv[4], 'wb') as f:\n\tpickle.dump(forest, f)", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def main():\n options = get_options()\n dataset, test, fs = get_dataset(options)\n\n def eval_all(folder):\n \"\"\"evaluates all optimizers and all models on given dataset, and saves\n info pictures to folder\n\n Args:\n folder: folder to save results\n \"\"\"\n optimizers = [\n tf.keras.optimizers.Adadelta(learning_rate=0.01),\n tf.keras.optimizers.Adagrad(learning_rate=0.002),\n tf.keras.optimizers.Adam(learning_rate=0.0001),\n tf.keras.optimizers.Adamax(learning_rate=0.0005),\n tf.keras.optimizers.Ftrl(learning_rate=0.002),\n tf.keras.optimizers.Nadam(learning_rate=0.001),\n tf.keras.optimizers.RMSprop(learning_rate=0.0005),\n tf.keras.optimizers.SGD(learning_rate=0.003),\n ]\n\n epochs = [\n 500, 120, 80, 150, 300, 60, 100, 500\n ]\n\n biased_randomized = [\n (models.DefaultModel, False),\n (models.BiasedModel, False),\n (models.NeuralModel, False),\n (models.DefaultModel, True),\n (models.BiasedModel, True),\n (models.NeuralModel, True),\n ]\n\n for optimizer, n_epochs in zip(optimizers, epochs):\n for model, rndmz in biased_randomized:\n eval_optimizer(folder,\n model,\n optimizer,\n n_epochs,\n rndmz)\n\n def eval_complicated(folder):\n optimizers = [\n tf.keras.optimizers.Adadelta,\n tf.keras.optimizers.Adagrad,\n tf.keras.optimizers.Adam,\n tf.keras.optimizers.Adamax,\n tf.keras.optimizers.Ftrl,\n tf.keras.optimizers.Nadam,\n tf.keras.optimizers.RMSprop,\n tf.keras.optimizers.SGD,\n ]\n\n type_eph_lrate = [\n (models.Deep2Hidden, 15, 0.00003),\n (models.Deep11Hidden, 15, 0.00003)\n ]\n\n for opt in optimizers:\n for model, epochs, lrate in type_eph_lrate:\n eval_optimizer(folder,\n model,\n opt(learning_rate=lrate),\n epochs,\n True)\n\n def eval_big(folder):\n optimizers_filter = [\n (tf.keras.optimizers.Adadelta(learning_rate=1e-3), 200),\n (tf.keras.optimizers.Adagrad(learning_rate=1e-3), 200),\n (tf.keras.optimizers.SGD(learning_rate=1e-3), 200)\n ]\n optimizers_layer = [\n (tf.keras.optimizers.Adam(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Adamax(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Ftrl(learning_rate=1e-5), 30),\n (tf.keras.optimizers.Nadam(learning_rate=1e-5), 30),\n (tf.keras.optimizers.RMSprop(learning_rate=1e-5), 30)\n ]\n optimizers_deep = [\n (tf.keras.optimizers.Adam(learning_rate=3e-6), 15),\n (tf.keras.optimizers.Adamax(learning_rate=3e-6), 15),\n (tf.keras.optimizers.RMSprop(learning_rate=3e-6), 15)\n ]\n mdls = [\n models.DefaultModel,\n models.NeuralModel,\n models.Deep1Hidden\n ]\n\n for (opt, model) in zip((optimizers_filter,\n optimizers_layer,\n optimizers_deep), mdls):\n for (optimizer, epochs) in opt:\n randomize = False if model is models.DefaultModel else True\n eval_optimizer(folder,\n model,\n optimizer,\n epochs,\n randomize)\n\n def eval_optimizer(folder,\n model, optimizer, epochs, randomize):\n \"\"\"Evaluates given model on given dataset\n\n Evaluates model on given dataset, optimizes result by optimizer, and saves\n info image to given folder\n\n Args:\n folder: folder to save info images\n model: tf.keras.Model model for evaluation\n optimizer: tf.keras optimizer\n epochs (int): epochs of training\n randomize (bool): tandomize initial weights and biases\n\n \"\"\"\n class2name = {\n models.DefaultModel: \"default\",\n models.BiasedModel: \"biased\",\n models.NeuralModel: \"neural\",\n models.NeuralSTD: \"neuralSTD\",\n models.Deep1Hidden: \"deep1h\",\n models.Deep2Hidden: \"deep2h\",\n models.Deep11Hidden: \"deep1_1\"\n }\n\n # prepare for training\n layer_len = len(dataset.take(1).as_numpy_iterator().next()[0][0])\n optimizer_conf = optimizer.get_config()\n fname = \"/%s_%s_%deph_%.5flrate_%s\" % \\\n (class2name[model],\n optimizer_conf[\"name\"],\n epochs,\n optimizer_conf[\"learning_rate\"],\n \"rnd\" if randomize else \"nornd\")\n\n pic_name = folder + fname + \".png\"\n file_name = folder + \"/models\" + fname + \".model\"\n model_obj = model(layer_len, randomize)\n model_obj.compile(optimizer=optimizer, loss=models.SimpleLoss())\n\n # prepare data from test dataset for result visualization\n train_sample = None\n no_train_sample = None\n samples = []\n labels = []\n for features, label in test.as_numpy_iterator():\n samples.append(features)\n labels.append(label)\n if train_sample is None and label == 1:\n train_sample = features\n if no_train_sample is None and label == 0:\n no_train_sample = features\n samples = np.array(samples)\n labels = np.array(labels, dtype=np.bool)\n # save untrained classification, for result visualization\n untrained_predicted_labels = model_obj(samples).numpy()\n # train model\n history = model_obj.fit(x=dataset, epochs=epochs)\n train_filtered = model_obj.filter_single(train_sample)\n no_train_filtered = model_obj.filter_single(no_train_sample)\n predicted_labels = model_obj(samples).numpy()\n\n # result visualization and saving\n fig = plt.figure(figsize=(15., 7.))\n loss_ax = fig.add_subplot(3, 1, 1)\n loss_ax.set_title(\"ход обучения\")\n loss_ax.set_xlabel(\"эпоха\")\n loss_ax.set_ylabel(\"ф-я потерь\")\n sig_untrained_ax = fig.add_subplot(3, 2, 3)\n sig_untrained_ax.set_title(\"примеры сигналов\")\n sig_untrained_ax.set_xlabel(\"время, сек\")\n sig_untrained_ax.set_ylabel(\"ускорение, мкм/сек^2\")\n sig_trained_ax = fig.add_subplot(3, 2, 4)\n sig_trained_ax.set_title(\"отфильтрованные сигналы\")\n sig_trained_ax.set_xlabel(\"время, сек\")\n sig_trained_ax.set_ylabel(\"ускорение, мкм/сек^2\")\n # sig_trained_ax.set_ylim(-1, 1)\n label_untrained_ax = fig.add_subplot(3, 2, 5)\n label_untrained_ax.set_title(\"классификация необученной моделью\")\n label_untrained_ax.set_xlabel(\"вероятность, что сигнал от поезда\")\n label_trained_ax = fig.add_subplot(3, 2, 6)\n label_trained_ax.set_title(\"классификация обученной моделью\")\n label_trained_ax.set_xlabel(\"вероятность, что сигнал от поезда\")\n\n loss_ax.plot(history.history[\"loss\"])\n train_ax_label, = sig_untrained_ax.plot(\n np.linspace(0, len(train_sample)/fs, len(train_sample)),\n train_sample,\n \"g\", label=\"сигнал с поездом\")\n no_train_ax_label, = sig_untrained_ax.plot(\n np.linspace(0, len(no_train_sample)/fs, len(no_train_sample)),\n no_train_sample,\n \"r\", label=\"сигнал без поезда\")\n sig_untrained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label, = sig_trained_ax.plot(\n np.linspace(0, len(train_filtered)/fs, len(train_filtered)-1),\n train_filtered[1:],\n \"g\", label=\"сигнал с поездом\")\n no_train_ax_label, = sig_trained_ax.plot(\n np.linspace(0, len(no_train_filtered)/fs, len(no_train_filtered)-1),\n no_train_filtered[1:],\n \"r\", label=\"сигнал без поезда\")\n sig_trained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label = label_untrained_ax.scatter(\n untrained_predicted_labels[labels],\n np.array(range(0, len(labels)))[labels],\n color='green', marker='.', label=\"сигнал с поездом\")\n no_train_ax_label = label_untrained_ax.scatter(\n untrained_predicted_labels[np.invert(labels)],\n np.array(range(0, len(labels)))[np.invert(labels)],\n color='red', marker='.', label=\"сигнал без поезда\")\n label_untrained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n train_ax_label = label_trained_ax.scatter(\n predicted_labels[labels],\n np.ma.array(range(0, len(labels)))[labels],\n color='green', marker='.', label=\"сигнал с поездом\")\n no_train_ax_label = label_trained_ax.scatter(\n predicted_labels[np.invert(labels)],\n np.array(range(0, len(labels)))[np.invert(labels)],\n color='red', marker='.', label=\"сигнал без поезда\")\n label_trained_ax.legend(handles=[train_ax_label, no_train_ax_label])\n fig.tight_layout(w_pad=3, h_pad=2,\n rect=[0.0225, 0.0225, 0.95, 0.95])\n #plt.show()\n plt.savefig(pic_name)\n with open(file_name, \"w\") as f:\n f.write(str(model_obj))\n\n for i in range(0, 20):\n path = \"tmp/%i\" % i\n if not os.path.exists(\"%s/models\" % path):\n os.makedirs(\"%s/models\" % path)\n eval_optimizer(\n path,\n models.DefaultModel,\n tf.keras.optimizers.Adagrad(learning_rate=0.001),\n 150,\n False,\n )", "def finetune_depth():\n start_depth = 3\n tol = 10E-4\n best_depth = start_depth\n acc = [-1]\n for i in tqdm(range(20),desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n XGBCla = get_XGBmodel(depth=i+start_depth)\n XGBCla.fit(X_train, y_train)\n pred = XGBCla.predict(X_test)\n acc.append(accuracy_score(y_test, pred))\n if (abs(acc[i]-acc[i+1])<tol):\n break\n if (acc[i]<acc[i+1]):\n best_depth = start_depth + i\n print(\"Accuracy: %.4f\" % acc[-1])\n print(\"Best depth: %d\" % best_depth)", "def optimize(self, args):\n\n ############################\n # Get the data\n ############################\n\n # Read the CSV file ignoring the header and the first column (which\n # contains the file name of the image used for extracting the data in\n # a row)\n try:\n data = np.genfromtxt(args.featuresFile, delimiter=',',\n skip_header=1)\n data = data[:, 1:]\n except:\n print('Could not read CSV file: {}'.format(args.featuresFile))\n return -1\n\n x = data[:, :-1]\n y = np.squeeze(data[:, -1:])\n\n ############################\n # Execute the optimization\n ############################\n\n tunningParams = [\n {\n 'kernel': ['linear'],\n 'C': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3]\n },\n {\n 'kernel': ['rbf'],\n 'gamma': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3],\n 'C': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3]\n },\n ]\n\n scores = ['precision', 'recall']\n\n for score in scores:\n print('# Tuning hyper-parameters for {}\\n'.format(score))\n\n clf = GridSearchCV(svm.SVC(C=1), tunningParams, cv=5,\n scoring=format('{}_macro'.format(score)))\n clf.fit(x, y)\n\n print('Best parameters set found on development set:\\n')\n print(clf.best_params_)\n\n print('\\nGrid scores on development set:\\n')\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print('{:.3f} (+/-{:.3f}) for {}'.format(mean, std * 2, params))\n\n #print('\\nDetailed classification report:\\n')\n #print('The model is trained on the full development set.')\n #print('The scores are computed on the full evaluation set.\\n')\n #y_true, y_pred = y_test, clf.predict(X_test)\n #print(classification_report(y_true, y_pred))\n #print()\n\n return 0", "def update(self,x,y):\n if self.ncores > 1:\n # parallel updates\n pass # FIXME\n else:\n # sequential updates\n for tree in self.forest:\n tree.update(x,y)", "def example():\n Optimizer = BFGS(f, g)\n startPoint = 100 * numpy.ones(2);\n res = Optimizer.optimize(startPoint,\n epsilon=1e-5,\n maxIterations=10)\n print res\n pass", "def main():\n\tdata = load_dataset()\n\tdata = normalize_data(data, cols_to_norm)\n\ttrain, test = generate_train_testset(data)\n\n\tX_train = train.drop(['Time', 'EVENT'], axis=1).dropna(axis=0)\n\ty_train = train.dropna(axis=0)['EVENT']\n\n\tX_test = test.drop(['Time', 'EVENT'], axis=1).dropna(axis=0)\n\ty_test = test.dropna(axis=0)['EVENT']\n\n\tmodel = XGBClassifier(n_estimators=1000, random_state=42)\n\tmodel.fit(X_train, y_train)\n\n\tprint(model)\n\n\ty_pred = model.predict(X_test)\n\tpredictions = [round(value) for value in y_pred]\n\n\taccuracy = accuracy_score(y_test, predictions)\n\tprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\n\tf1 = f1_score(y_test, y_pred)\n\tprint(\"F1: %.6f%%\" % (f1))", "def fill(self):\n # Fail fast if num_classes or num_features isn't set.\n _ = getattr(self, 'num_classes')\n _ = getattr(self, 'num_features')\n\n self.training_library_base_dir = getattr(\n self, 'training_library_base_dir', '')\n self.inference_library_base_dir = getattr(\n self, 'inference_library_base_dir', '')\n\n self.bagged_num_features = int(self.feature_bagging_fraction *\n self.num_features)\n\n self.bagged_features = None\n if self.feature_bagging_fraction < 1.0:\n self.bagged_features = [random.sample(\n range(self.num_features),\n self.bagged_num_features) for _ in range(self.num_trees)]\n\n self.regression = getattr(self, 'regression', False)\n\n # Num_outputs is the actual number of outputs (a single prediction for\n # classification, a N-dimenensional point for regression).\n self.num_outputs = self.num_classes if self.regression else 1\n\n # Add an extra column to classes for storing counts, which is needed for\n # regression and avoids having to recompute sums for classification.\n self.num_output_columns = self.num_classes + 1\n\n # Allow each tree to be unbalanced by up to a factor of 2.\n self.max_depth = (self.max_depth or\n int(2 * math.ceil(math.log(self.max_nodes, 2))))\n\n # The Random Forest literature recommends sqrt(# features) for\n # classification problems, and p/3 for regression problems.\n # TODO(thomaswc): Consider capping this for large number of features.\n self.num_splits_to_consider = (\n self.num_splits_to_consider or\n max(10, int(math.ceil(math.sqrt(self.num_features)))))\n\n # max_fertile_nodes doesn't effect performance, only training speed.\n # We therefore set it primarily based upon space considerations.\n # Each fertile node takes up num_splits_to_consider times as much\n # as space as a non-fertile node. We want the fertile nodes to in\n # total only take up as much space as the non-fertile nodes, so\n num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))\n # But always use at least 1000 accumulate slots.\n num_fertile = max(num_fertile, 1000)\n self.max_fertile_nodes = self.max_fertile_nodes or num_fertile\n # But it also never needs to be larger than the number of leaves,\n # which is max_nodes / 2.\n self.max_fertile_nodes = min(self.max_fertile_nodes,\n int(math.ceil(self.max_nodes / 2.0)))\n\n # We have num_splits_to_consider slots to fill, and we want to spend\n # approximately split_after_samples samples initializing them.\n num_split_initializiations_per_input = max(1, int(math.floor(\n self.num_splits_to_consider / self.split_after_samples)))\n self.split_initializations_per_input = getattr(\n self, 'split_initializations_per_input',\n num_split_initializiations_per_input)\n\n # If base_random_seed is 0, the current time will be used to seed the\n # random number generators for each tree. If non-zero, the i-th tree\n # will be seeded with base_random_seed + i.\n self.base_random_seed = getattr(self, 'base_random_seed', 0)\n\n return self", "def run(self, fitness_function):\r\n return run_helper(depth, fitness_function, 0, 0, 0) # fixme\r", "def __init__(self, n_samples=1000, n_features=4):\n self.n_samples = 1000\n self.n_features = 4\n self.forest = []", "def build(self):\n # weights to apply to training samples, updated on each\n # iteration of the boosting algo, normalised to 1\n sigWeights = np.ones(self.nSig, dtype=float)\n bkgWeights = np.ones(self.nBkg, dtype=float)\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight \n\n # Weight of each tree, strong classifers have higher weight\n self.treeWeights = np.zeros(self.ntrees, dtype=float)\n\n for i in xrange(self.ntrees):\n\n # build new tree\n newTree = Tree()\n newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))\n newTree.build()\n self.dTrees.append(newTree) \n\n # evaluate trees\n # keep track of each event\n err = 0.0\n sigWrong = np.zeros(self.nSig)\n bkgWrong = np.zeros(self.nBkg)\n\n for j in range(self.nSig):\n if newTree.classify(np.array((self.sigData[j,])))<0:\n sigWrong[i]=1\n err+=sigWeights[j]\n\n for j in range(self.nBkg):\n if newTree.classify(np.array((self.bkgData[j,])))>0:\n bkgWrong[i]=1\n err+=bkgWeights[j]\n\n alpha = self.beta*math.log((1.0-err)/err)\n print err,alpha\n corFactor = math.exp(-alpha)\n wrongFactor = math.exp(alpha)\n\n if (err<1e-20 or err >= 0.5):\n print \"SOEMTHING WRONG!!\"\n\n self.treeWeights[i] = alpha\n\n # reweight training samples\n for j in range(self.nSig):\n if sigWrong[j]:\n sigWeights[j]*=wrongFactor\n else :\n sigWeights[j]*=corFactor\n\n for j in range(self.nBkg):\n if bkgWrong[j]:\n bkgWeights[j]*=wrongFactor\n else :\n bkgWeights[j]*=corFactor\n\n # normalise weights\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight", "def main():\n # Load and preprocess the data.\n ((x_train, y_train), (x_test, y_test)) = (\n preprocess_dataset(x, y) for (x, y) in datasets.mnist.load_data()\n )\n (_, n_classes) = y_train.shape\n\n # Create and fit the original model.\n model = create_model(\n LAYER_SIZES,\n n_classes,\n layer_fn=functools.partial(layers.Dense, use_bias=False),\n layer_kwargs_fn=dense_kwargs,\n )\n model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=N_EPOCHS)\n\n # Evaluate the pruning methods.\n pruning_methods = [(\"weight\", prune_weights), (\"neuron\", prune_neurons)]\n for (method_name, pruning_fn) in pruning_methods:\n # Compute the evaluation curves.\n (accuracy_curve, time_curve) = zip(\n *[\n evaluate_fraction(pruning_fn, model, (x_test, y_test), fraction)\n for fraction in PRUNING_FRACTIONS\n ]\n )\n # Plot them.\n for (subplot, curve) in [(211, accuracy_curve), (212, time_curve)]:\n plt.subplot(subplot)\n plt.plot(PRUNING_FRACTIONS, curve, label=method_name)\n\n if method_name == \"neuron\":\n # The first evaluated model in neuron pruning is the unpruned\n # one. Use it as a baseline.\n (baseline, *_) = curve\n plt.plot((0, 1), (baseline, baseline), \"--\", label=\"unpruned\")\n\n # Add some labels to the plots.\n for (subplot, y_label) in [(211, \"accuracy\"), (212, \"inference time\")]:\n plt.subplot(subplot)\n plt.xlabel(\"pruning fraction\")\n plt.ylabel(y_label)\n plt.legend()\n plt.show()", "def extra_trees_test(n_jobs=1):\n # model = models.RandomForest.ExtraTreesModel()\n # model.run('cv')\n\n # tune the model - 15 trees already gives .13 RMSE, I think that's slightly better than RF with that number of trees\n params = {\n 'n_estimators': [15, 50, 100, 250]\n }\n model = models.RandomForest.ExtraTreesModel(\n grid_search_parameters=params,\n grid_search_sample=0.5,\n n_jobs=n_jobs\n )\n model.run('grid_search', refit=True)\n # 2014-01-21 05:45:28 - Base - INFO - Found best parameters:\n # 2014-01-21 05:45:28 - Base - INFO - {'n_estimators': 250}\n # 2014-01-21 05:45:28 - Base - INFO - Predicting on holdout set\n # 2014-01-21 05:45:41 - classes - INFO - RMSE: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - RMSE on holdout set: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - Grid search completed in 8916.21896791\n # 2014-01-21 05:45:41 - Base - INFO - Model completed in 9332.45440102\n\n # As expected, more trees = better performance. Seems like the performance is on par/slightly better than random forest", "def randomforest_fit(self, n_estimators: int = 100, max_depth: int = None, min_samples_split: int = 2):\r\n self.rfModel = RandomForestClassifier(\r\n n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split).fit(self.x, self.y)", "def evaluate(model, x_test, y_test,y_predict):\n \n \n print( \"================================================================================\")\n print (\"Summary of Results:\")\n \n print (\"Forest Size :\" , FOREST_SIZE)\n \n print(\"Accuracy Score: \",accuracy_score(y_test,y_predict))\n print(\"Mse: \",mean_squared_error(y_test,y_predict))\n #average_precision = average_precision_score(y_test, y_predict)\n #print(average_precision)\n \n #fpr, tpr, thresholds = roc_curve(y_test, y_predict, pos_label=2)\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_predict, pos_label=2)\n print(\"auc\",metrics.auc(fpr, tpr))\n \n \n \n #print (\"ROC : \", roc_curve(y_test,y_predict))\n #print (\"AUC : \", auc(y_test,y_predict,reorder=False))\n \n\n \n print (\"================================================================================\")\n #print(average_precision=average_precision_score(Y_test, y_predict))\n #print(\"Average precision-recall score:\", average_precision)\n print()\n print (\"================================================================================\")\n print( \"Feature importance for Random Forest Classifier:\\n\")\n names=['client_id','host_name','page_path','click_time']\n print (sorted(zip(map(lambda x: round(x, 4), model.feature_importances_), names), reverse=True))\n \n print (\"================================================================================\")\n print (\"Done with evaluation\")\n return None", "def main():\n \n # 1. Learn a decision tree from the data in training.txt\n print \"--Building trees--\"\n train_examples = read_file('training.txt')\n print(train_examples)\n attrs = range(len(train_examples[0])-1)\n rand_tree = decision_tree_learning(train_examples, attrs, use_gain=False)\n gain_tree = decision_tree_learning(train_examples, attrs, use_gain=True)\n print \"--Done building--\"\n print\n\n # 2. Document the tree you got\n print \"--Random tree--\"\n print_tree(rand_tree)\n print\n print \"--Learn tree--\"\n print_tree(gain_tree)\n print\n\n # 3. Classify all examples in the test-set\n test_examples = read_file('test.txt')\n print \"--Testing random tree--\"\n test(rand_tree, test_examples, attrs)\n print\n print \"--Testing information gain tree--\"\n test(gain_tree, test_examples, attrs)\n print \"--Done testings--\"", "def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())", "def run(self):\n\n # init\n base_value = self._problem.evaluate()\n self._problem.set_as_best(base_value)\n\n # init iteration (used to count the amount of iterations)\n iteration = 0\n\n # add to data\n self._data_append(self.data, iteration, base_value, base_value)\n\n # init termination criterion\n self._termination_criterion.check_first_value(base_value)\n self._termination_criterion.start_timing()\n\n # main loop\n while self._termination_criterion.keep_running():\n\n # search the neighbourhood for the best move\n best_found_delta = self._best_found_delta_base_value\n best_found_move = None\n\n for move in self._problem.get_moves():\n\n # check quality move\n delta = self._problem.evaluate_move(move)\n\n # checks how the move alters the current state\n diff = self._diff(move)\n\n # if not in tabu list --> not similar to earlier performed\n # moves --> if delta better than old best move\n # --> becomes the best move\n\n if not self._tabu_list.contains(diff) and \\\n self._is_better(best_found_delta, delta):\n best_found_delta = delta\n best_found_move = move\n best_found_diff = diff\n\n # the best found move will be used as the next move\n # alter state problem\n base_value = base_value + best_found_delta\n\n # check if a move was found\n if best_found_move is not None:\n\n self._problem.move(best_found_move)\n\n # if better than best found --> new best_found\n if self._is_better(self._problem.best_order_value,\n base_value):\n self._problem.set_as_best(base_value)\n # log the better solution\n self._log_improvement(base_value)\n\n # add diff to tabu list\n self._tabu_list.add(best_found_diff)\n\n # add to data\n self._data_append(self.data, iteration,\n base_value, self._problem.best_order_value)\n\n self._termination_criterion.check_new_value(base_value)\n\n # functions _termination_criterion called\n self._termination_criterion.check_new_value(base_value)\n\n else:\n # no move found --> we're stuck --> break loop\n break\n\n iteration += 1\n self._termination_criterion.iteration_done()\n\n # last data point\n self._data_append(self.data, iteration, base_value,\n self._problem.best_order_value)\n\n # if we have data:\n # convert data to something easier to plot\n if self.data is not None:\n\n # convert to tuple of list\n data = convert_data(self.data)\n\n # make namedtuple\n DataAsLists = namedtuple(\n 'Data', ['time', 'iteration', 'value', 'best_value'])\n\n data = DataAsLists(data[0], data[1], data[2], data[3])\n\n else:\n data = None\n\n # return results\n\n Results = namedtuple('Results', ['best_order', 'best_value', 'data'])\n\n return Results(self._problem.best_order,\n self._problem.best_order_value,\n data)", "def run_my_stack():\r\n\r\n X_train, y_train, X_test = load_features()\r\n fit_funcs = list()\r\n predict_funcs = list()\r\n configs = list()\r\n MAX_ROUND = 3\r\n\r\n # lgb\r\n num_leaves = [31, 41, 51, 61, 71, 81, 91]\r\n feature_fractions = [0.4, 0.4, 0.4, 0.3, 0.3, 0.3, 0.3]\r\n for i in range(len(num_leaves)):\r\n lgb_config = LGB_Config()\r\n lgb_config.params['num_leaves'] = num_leaves[i]\r\n lgb_config.params['feature_fraction'] = feature_fractions[i]\r\n lgb_config.seed = np.random.randint(0, 10000)\r\n lgb_config.save_model_path = None\r\n # lgb_config.max_round = MAX_ROUND\r\n configs.append(lgb_config)\r\n fit_funcs.append(lgb_fit)\r\n predict_funcs.append(lgb_predict)\r\n\r\n max_depths = [6, 7]\r\n colsample_bytrees = [0.7, 0.6]\r\n for i in range(len(max_depths)):\r\n xgb_config = XGB_Config()\r\n xgb_config.params['max_depth'] = max_depths[i]\r\n xgb_config.params['colsample_bytree'] = colsample_bytrees[i]\r\n xgb_config.seed = np.random.randint(0, 10000)\r\n xgb_config.save_model_path = None\r\n # xgb_config.max_round = MAX_ROUND\r\n configs.append(xgb_config)\r\n fit_funcs.append(xgb_fit)\r\n predict_funcs.append(xgb_predict)\r\n\r\n # cgb\r\n max_depths = [8]\r\n for i in range(len(max_depths)):\r\n cgb_config = CGB_Config()\r\n cgb_config.params['depth'] = max_depths[i]\r\n cgb_config.seed = np.random.randint(0, 10000)\r\n cgb_config.save_model_path = None\r\n # cgb_config.max_round = MAX_ROUND\r\n configs.append(cgb_config)\r\n fit_funcs.append(cgb_fit)\r\n predict_funcs.append(cgb_predict)\r\n\r\n X_train_stack, y_train_stack, X_test_stack = my_stacking(fit_funcs, predict_funcs, configs, X_train, y_train,\r\n X_test)\r\n result_path = 'result/my_stack_result-{}.csv'.format(time.strftime(\"%m%d-%H%M%S\"))\r\n y_pred_prob = final_fit_predict(X_train_stack, y_train_stack, X_test_stack, save_result_path=result_path)\r\n return y_pred_prob", "def run(plot, df, savedir, raw_box_savedir, rgb_pool=None, saved_model=None, deepforest_model=None):\n from deepforest import deepforest\n\n #create deepforest model\n if deepforest_model is None:\n if saved_model is None:\n deepforest_model = deepforest.deepforest()\n deepforest_model.use_release()\n else:\n deepforest_model = deepforest.deepforest(saved_model=saved_model)\n \n #Filter data and process\n plot_data = df[df.plotID == plot]\n predicted_trees, raw_boxes = process_plot(plot_data, rgb_pool, deepforest_model)\n \n #Write merged boxes to file as an interim piece of data to inspect.\n predicted_trees.to_file(\"{}/{}_boxes.shp\".format(savedir, plot))\n raw_boxes.to_file(\"{}/{}_boxes.shp\".format(raw_box_savedir, plot))", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def apply(self, fgraph):\r\n pass", "def _segmentation(self):\n ab_list = [\"Apthae\", \"Ulcer\", \"Bleeding\", \"Lymphangectasias\", \"Angioectasias\",\n \"Polypoids\", \"ChylousCysts\", \"Stenoses\", \"Voedemas\"]\n\n for i in range(8):\n os.makedirs(self.root + \"/step_3/Segmentation/\" + ab_list[i])\n if len(os.listdir(self.root + '/step_2/'+ ab_list[i])) != 0:\n _t = _test(\n self.root + \"/step_2/\"+ ab_list[i],\n self.root + \"/step_3/Segmentation/\" + ab_list[i],\n f\"{self.cwd}/step3/Abnormality_{i}/best_weights.pth.tar\",\n step=3,\n )\n # print('Abnormality: ', i)\n _t._predict()\n \n else:\n continue \n\n print(\"Segmentation of abnormalities --> DONE\")", "def main():\n\n # path of model that should be pruned\n model_path = ('saved_models/PATH_TO_MODEL/model.h5')\n\n # weights below this threshold will be set to zero\n # thresholds can be defined per layer\n thresholds = [0.03, 0.01, 0.01]\n\n # specify training epochs for retraining\n epochs = [1, 1, 1]\n # define the layer index that should be pruned\n # only feedforward layers can be pruned!!!\n layers = [3, 4, 5]\n\n # TrainingData section\n # specify input dimension of the sliding window using 'slice_len'\n slice_len = 30\n\n # output delay for AREUS data\n delay = 6\n\n td1 = TrainingData()\n training_data = td1.window_dim_1_sized_td(slice_len, delay)\n\n # Pruning runs for each layer\n p_run = PruningRun(model_path, training_data)\n for i, layer in enumerate(layers):\n p_run.prune_layer(layer, thresholds[i], epochs[i])\n\n # when no retraining is needed\n #p_run.prune_layer_no_retraining(layer, thresholds[i])", "def RForest(x, y, s):\n usx = np.array(x)\n usy = np.array(y)\n\n # split data into train and validation set\n x_train, x_test, y_train, y_test = train_test_split(usx, usy, test_size=s)\n clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\n clf.fit(x_train, y_train)\n y_predict = clf.predict(x_test)\n\n # select only the probabilities of being fraud\n y_pred_prob = clf.predict_proba(x_test)[:, 1]\n return y_predict, y_test, y_pred_prob", "def run_fasttree_raxml_survived_best_garli(working_dir, seqdb, run_id, fasta_file, number_of_sequences, base_seq_name, raxml_kill_rate, raxml_bfgs, raxml_model_optimization_precision, raxml_num_runs, garli_num_runs, garli_attachmentspertaxon, garli_stoptime, email, machines):\n runner = FasttreeRaxmlSurvivedBestGarli(\n working_dir=working_dir, wait_timeout=600, seqdb=seqdb, run_id=run_id, fasta_file=fasta_file, number_of_sequences=number_of_sequences, base_seq_name=base_seq_name,\n raxml_settings={\"kill_rate\": raxml_kill_rate, \"bfgs\": raxml_bfgs, \"model_optimization_precision\": raxml_model_optimization_precision, \"num_runs\": raxml_num_runs},\n garli_settings={\"num_runs\": garli_num_runs, \"attachmentspertaxon\": garli_attachmentspertaxon, \"stoptime\": garli_stoptime},\n email=email, machines=machines)\n while not runner.finished():\n runner.iteration()\n return runner.results()\n\n # run_id = run_id.replace(' ', '-').replace('/', '-') # raxml cannot handle spaces and slashes in run-id\n # save_settings(working_dir=working_dir, run_id=run_id, mode=\"fasttree-survived-best\", fasta_file=Path(fasta_file).resolve(), number_of_sequences=number_of_sequences, base_seq_name=base_seq_name, raxml_bfgs=raxml_bfgs, raxml_model_optimization_precision=raxml_model_optimization_precision, raxml_num_runs=raxml_num_runs, garli_num_runs=garli_num_runs, garli_attachmentspertaxon=garli_attachmentspertaxon, garli_stoptime=garli_stoptime, email=email, machines=machines)\n # r_fasttree = run_fasttree(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, email=email, machines=machines)\n # r_raxml = run_raxml_survived(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, source_tree=r_fasttree.best_tree(), base_seq_name=base_seq_name, raxml_kill_rate=raxml_kill_rate, raxml_bfgs=raxml_bfgs, raxml_model_optimization_precision=raxml_model_optimization_precision, raxml_num_runs=raxml_num_runs, email=email, machines=machines)\n # r_garli = run_garli(working_dir=working_dir, run_id=run_id, fasta_file=fasta_file, tree=r_raxml.best_tree(), garli_num_runs=garli_num_runs, garli_attachmentspertaxon=garli_attachmentspertaxon, garli_stoptime=garli_stoptime, email=email, machines=machines)\n # return make_results(working_dir=working_dir, r_raxml=r_raxml, r_garli=r_garli, seqdb=seqdb)", "def run(self, scheduler=\"single-threaded\"):\n _ = dask.compute(self.leaves, scheduler=scheduler)\n # when dask goes thru the tree, it knows the full sequence of ops\n # needed to compute each leaf, so this gives dask full authority in\n # determining the best dispatch path.", "def __init__(self, X_init: np.ndarray, Y_init: np.ndarray, num_trees: int = 30,\n do_bootstrapping: bool = True, n_points_per_tree: int = 0, seed: int = None) -> None:\n super().__init__()\n\n # Set random number generator for the random forest\n if seed is None:\n seed = np.random.randint(10000)\n self.reg_rng = reg.default_random_engine(seed)\n\n self.n_points_per_tree = n_points_per_tree\n\n self.rf = reg.binary_rss_forest()\n self.rf.options.num_trees = num_trees\n\n self.rf.options.do_bootstrapping = do_bootstrapping\n\n self.rf.options.num_data_points_per_tree = n_points_per_tree\n\n self._X = X_init\n self._Y = Y_init\n\n if self.n_points_per_tree == 0:\n self.rf.options.num_data_points_per_tree = X_init.shape[0]\n\n data = reg.default_data_container(self._X.shape[1])\n\n for row_X, row_y in zip(X_init, Y_init):\n data.add_data_point(row_X, row_y)\n\n self.rf.fit(data, self.reg_rng)", "def randomforest_cv(self, nsplits: int = 5) -> (float, float, float):\r\n params = {\r\n \"n_estimators\": [20, 50, 100, 200],\r\n \"max_depth\": [2, 3, 5, 8, 10, 15, 20],\r\n }\r\n model = RandomForestClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(self.x, self.y)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = RandomForestClassifier(**best_params).fit(x_train, y_train)\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params", "def fit(self, x: np.array, t: np.array, y: np.array) -> None:\n\n self.forest.fit(x, y)", "def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GALE_ProgressTrack\"\r\n outPop = \"GH_GALE_PopulationOut\"\r\n bitLength = 1 # This implementation is not yet set up to handle other rule representations, or bit encoding lengths.\r\n CVpartitions = 10\r\n trackCycles = 1\r\n \r\n iterInput = '5.10.20' \r\n xdim = 10\r\n ydim = 10\r\n dist = 2\r\n wild = 0.75\r\n prune = 1\r\n \r\n #Figure out the iteration stops for evaluation, and the max iterations.\r\n iterList = iterInput.split('.')\r\n for i in range(len(iterList)):\r\n iterList[i] = int(iterList[i])\r\n lastIter = iterList[len(iterList)-1] \r\n\r\n #Sets up up algorithm to be run.\r\n GALEConstants.setConstants(prune, wild)\r\n e = GALE_Environment(trainData,testData,bitLength)\r\n sampleSize = e.getNrSamples()\r\n gale = GALE(e, outProg, outPop, bitLength, CVpartitions, graphPerformance, xdim, ydim, dist)\r\n \r\n #Set some GALE parameters.\r\n if trackCycles == 'Default':\r\n gale.setTrackingIterations(sampleSize)\r\n else:\r\n gale.setTrackingIterations(trackCycles) \r\n gale.setNumberOfTrials(lastIter, iterList) \r\n \r\n #Run the GALE Algorithm \r\n gale.runGALE()", "def optimization_parameters():\n param_distributions = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_features\": [\"auto\", \"log2\"],\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n param_grid = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n\n rfc = RandomForestClassifier()\n\n # 5 * 10 * 9 * 5 * 2 = 4500 iterations\n # will take a lot of time\n model = GridSearchCV(\n estimator=rfc,\n param_grid=param_grid,\n scoring=\"accuracy\",\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n # initiates Randomized Search \n model = RandomizedSearchCV(\n estimator=rfc,\n param_distributions=param_distributions,\n n_iter=20,\n scoring='accuracy',\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n \n # fit and predict the model\n model.fit(x_train, y_train)\n pred = model.predict(x_test)\n \n # define evaluation metric as accuracy score\n acc = accuracy_score(y_test, pred) * 100\n print(f\"RandomForestClassifier with GridSearchCV: {acc:0.2f}%\")\n print(\"Best parameters set:\")\n\n # extract best parameters \n best_parameters = model.best_estimator_.get_params()\n for param_name in sorted(param_grid.keys()):\n print(f\"\\t{param_name}: {best_parameters[param_name]}\")", "def randomForestClassifier(self, train_cols, test_cols, targets, feature_selction_var, min_abundance_threshold, shuffle=False):\n from sklearn.ensemble import RandomForestClassifier\n #from sklearn.ensemble import RandomForestRegressor\n \n #train = self.abundance_df.loc[:,train_cols] #train.as_matrix(cols)\n train = self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols] #train.as_matrix(cols)\n #test = self.abundance_df.loc[:,test_cols] #.as_matrix(test_cols)\n test = self.abundance_df[self.abundance_df['masked']==False].loc[:,test_cols] #.as_matrix(test_cols)\n #names = list(self.abundance_df.loc[:, 'species'])\n names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'])\n \n #most_common_species_set = set()\n #for col in train_cols:\n # sorted_series = self.abundance_df.loc[:, col].sort_values(ascending=False)[:100]\n # most_common_species_set |= set(list(sorted_series.index))\n #most_common_species_list = []\n #for id0 in most_common_species_set:\n # #print(max(self.abundance_df.loc[id0,train_cols]))\n # if max(self.abundance_df.loc[id0,train_cols]) >= min_abundance_threshold:\n # most_common_species_list.append(id0)\n ##print(len(most_common_species_list))\n #most_common_species_set = set(most_common_species_list)\n #train = train.loc[list(most_common_species_set),:]\n #test = test.loc[list(most_common_species_set),:]\n #names = list(self.abundance_df.loc[list(most_common_species_set),'species'])\n \n #feature selection by variance\n from sklearn.feature_selection import VarianceThreshold\n sel = VarianceThreshold(threshold=(0.999 * (1 - 0.999))) \n if feature_selction_var:\n #ds1 = np.transpose(ds10.as_matrix())\n #ds1 = sel.fit_transform(np.transpose(ds10.as_matrix()))\n #ds2 = np.transpose(ds20.as_matrix())\n #train = sel.fit_transform(np.transpose(train.as_matrix()))\n train = sel.fit_transform(np.transpose(train.values))\n \n #names = list(self.abundance_df.loc[:, 'species'].as_matrix()[sel.get_support()])\n #names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].as_matrix()[sel.get_support()])\n names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].values[sel.get_support()])\n #test = sel.fit_transform(np.transpose(test.as_matrix()))\n test = sel.fit_transform(np.transpose(test.values))\n ds10 = np.asmatrix(train)[[i for i, j in enumerate(targets) if j == 0],:]\n ds1 = np.transpose(sel.fit_transform(np.transpose(ds10)))\n else:\n\n #train = np.transpose(train.as_matrix())\n train = np.transpose(train.values)\n #test = np.transpose(test.as_matrix())\n test = np.transpose(test.values)\n ds10 = train.iloc[:,[i for i, j in enumerate(targets) if j == 0]]\n #ds1 = np.transpose(ds10.as_matrix())\n ds1 = np.transpose(ds10.values)\n\n if shuffle == 'index':\n from random import shuffle\n shuffle(names)\n\n #rf = RandomForestClassifier(n_estimators=10)\n target = targets \n #group1 = list(self.abundance_df.loc[:,train_cols].columns[:target.count(0)])\n group1 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[:target.count(0)])\n #group2 = list(self.abundance_df.loc[:,train_cols].columns[target.count(0):])\n group2 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[target.count(0):])\n\n #rf = RandomForestRegressor(n_estimators=1000)#, class_weight=\"balanced\")\n rf = RandomForestClassifier(n_estimators=1000) # bootstrap=False\n #, max_features=100)#, min_sample_leaf=50)\n #rf = RandomForestRegressor(n_estimators=20, max_features=2)\n #class_weight=\"balanced\" #{class_label: weight}\n #n_estimators=1000,\n rf.fit(train, target)\n \n #from sklearn.metrics import roc_auc_score\n #for l in leaf:\n #model = RandomForestRegressor(min_samples_split=2, max_depth=None, bootstrap=False, min_samples_leaf=2)\n # #n_estimator=200, oob_score=True, min_samples_leaf=10,max_features=f, \n #model.fit(train,target)\n # #print(\"AUC - ROC : \")\n # #print(roc_auc_score(target,model.oob_prediction_))\n # #print(model.feature_importances_)\n \n #from sklearn.ensemble import ExtraTreesClassifier\n #model = ExtraTreesClassifier()\n #model.fit(train, target)\n \n from treeinterpreter import treeinterpreter as ti\n prediction, bias, contributions = ti.predict(rf, np.array(train))\n \n #for i in range(len(train)):\n # j = 0\n # # print(i)\n # #print(\"\\tBias (trainset mean)\")\n # #print(bias[i])\n # # print(contributions[0][0])\n # #for c, feature in sorted(zip(contributions[i], \n # # names), \n # # #self.abundance_df.index), \n # # key=lambda x: -abs(x[0])):\n # for c, feature in zip(contributions[i], list(self.abundance_df.index)):\n # if c[0] != 0:\n # #print feature, ':\\t', \"{:.2e}\".format(c), '\\t', self.abundance_df.loc[feature, 'species']\n # if j <10:\n # # print()'\\t' + self.abundance_df.loc[feature, 'species'], '\\t', \"{:.2e}\".format(c[0]))\n # j += 1\n totalc = np.mean(contributions, axis=0) \n \n #from sklearn import model_selection\n #from sklearn.model_selection import cross_val_score\n #clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)\n #scores = cross_val_score(clf, X, y)\n \n ##compare 2 groups of samples\n prediction1, bias1, contributions1 = ti.predict(rf, np.array(ds1))\n\n mean_contri = [0 for i in xrange(len(names))]\n for s in xrange(len(ds1)):\n for i in xrange(len(names)):\n mean_contri[i] += contributions1[s][i][0]\n mean_contri = [x/len(ds1)for x in mean_contri]\n \n names_list = []\n #for c, org in sorted(zip(mean_contri, list(self.abundance_df.loc[:,'species'])), reverse=True):\n for c, org in sorted(zip(mean_contri, names), reverse=True):\n if c != 0:\n #print(self.abundance_df.loc[i,group1])\n #idx = self.abundance_df[self.abundance_df['species'] == org].index.tolist()[0]\n idx = self.abundance_df[self.abundance_df['masked']==False][self.abundance_df['species'] == org].index.tolist()[0]\n if shuffle:\n #print(names.index(org))\n #idx = list(self.abundance_df.index)[names.index(org)]\n idx = list(self.abundance_df[self.abundance_df['masked']==False].index)[names.index(org)]\n #maximum = max(self.abundance_df.loc[idx,group1 + group2])\n maximum = max(self.abundance_df[self.abundance_df['masked']==False].loc[idx,group1 + group2])\n #print(str(round(c, 3)) + '\\t' + org + '\\t' + str(round(maximum,3)))\n names_list.append([round(c, 3), org, round(maximum,3)])\n \n return names_list", "def main():\n\n # choose number of data-points and sample a pair of vectors: the input\n # values and the corresponding target values\n N = 500\n inputs, targets = sample_data(N, arbitrary_function_2, seed=1)\n\n # specify the centres and scale of some rbf basis functions\n default_centres = np.linspace(0,1,21)\n default_scale = 0.03\n default_reg_param = 0.08\n\n # get the cross-validation folds\n num_folds = 4\n folds = create_cv_folds(N, num_folds)\n\n # evaluate then plot the performance of different reg params\n evaluate_reg_param(inputs, targets, folds, default_centres, default_scale)\n # evaluate then plot the performance of different scales\n evaluate_scale(inputs, targets, folds, default_centres, default_reg_param)\n # evaluate then plot the performance of different numbers of basis\n # function centres.\n evaluate_num_centres(\n inputs, targets, folds, default_scale, default_reg_param)\n\n plt.show()", "def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def run_algorithm(self):\n population_size = self.population_size\n simulator = self.simulator\n num_generations = self.num_generations\n current_dir = os.getcwd()\n urdf = current_dir + os.sep + os.path.join(\"URDF\", \"Ghost\", \"urdf\", \"Ghost.urdf\")\n simulated_robot = Robot(urdf, (0, 0, 0.4))\n simulated_robot.set_id(simulator.load_new_robot_urdf(simulated_robot))\n # make placeholders\n counter = 0\n best_genome = None\n best_fit = 0\n evals = population_size * (num_generations + 1)\n beam_fit = np.zeros(evals)\n current_population = self.make_population()\n current_population_fitness = [0] * self.population_size\n # print(\"build robots\")\n for k in range(self.population_size):\n #\tprint(\"initial robot \" , k)\n robot = current_population[k]\n simulator.load_robot_parameters(robot.parameters, 0)\n robot.set_fitness(simulator.compute_walk_fitness(1000)[0]) # evaluate the robot's fitness\n fitness = robot.get_fitness()\n current_population_fitness[k] = fitness\n \n if counter == 0:\n beam_fit[counter] = current_population_fitness[k] \n else:\n \n if beam_fit[counter - 1] < current_population_fitness[k]: # if the best overall robot thus far\n best_genome = robot.genome.copy() # update the best robot's genome\n beam_fit[counter] = current_population_fitness[k] \n else:\n beam_fit[counter] = beam_fit[counter - 1]\n best_fit = beam_fit[counter]\n\n counter +=1 \n\n\n #\tprint(\"origional robots evaluated, their fitness is \" , )\n for i in range(num_generations): # perform mutations equal to num_Climb\n #\t\t\tprint(\"start of gen , current population_fitness\" , current_population_fitness)\n population = current_population.copy()\n population_fitness = current_population_fitness.copy()\n print('gen' , i)\n for j in range(self.population_size):\n robot = population[j]\n mut_loc, old_val = robot.mutate_genome() # Mutation: Keep track of mut location and previous vals\n simulator.load_robot_parameters(robot.parameters, 0)\n robot.set_fitness(simulator.compute_walk_fitness(1000)[0]) # evaluate the robot's fitness\n fit_new = robot.get_fitness()\n population_fitness[j] = fit_new\n # BIG POINT - here we keep regardless if the change is better or not\n if fit_new > best_fit: # update learning curve\n best_fit = fit_new\n best_genome = robot.genome.copy()\n beam_fit[counter] = best_fit\n counter += 1\n #\t\t\tprint(\" ... \")\n #\t\t\tprint(\"end of gen , current population_fitness\" , current_population_fitness)\n # concat the populations and population fitnesses\n total_population = current_population + population\n total_population_fitness = current_population_fitness + population_fitness\n # print(\"before quick sort \" , total_population_fitness)\n # print(\" ... \")\n # sort the lists\n self.quick_sort(total_population_fitness, total_population, 0, len(total_population) - 1)\n # print(\" after quick sort \" , total_population_fitness)\n # print(\" ... \")\n # keep the top half\n current_population = total_population[:self.population_size]\n current_population_fitness = total_population_fitness[:self.population_size]\n # print(\"keep \", current_population_fitness)\n #\t\tprint(counter)\n\n if not os.path.exists('./data'):\n os.mkdir('./data')\n\n np.savetxt(\"beam_genome_gen_999_pop_100.csv\", best_genome, delimiter=\",\")\n np.savetxt(\"beam_learning_gen_999_pop_100.csv\", beam_fit, delimiter=\",\")", "def run_algorithm(self):\n print(f\"Checking all possible configurations with {self.algorithm}...\")\n\n if self.algorithm == \"test\" or (self.algorithm == \"greedy\" and\n self.iterations == 1000):\n\n # Test each configuration found with greedy (1000 iterations)\n while True:\n try:\n self.index += 1\n self.batteries = self.load_batteries(self.index)\n\n # Break if all configurations are checked\n except FileNotFoundError:\n break\n self.calculate_cable()\n self.link_houses()\n greedy(self, 1000)\n\n # Load best solution if user wanted to run greedy\n if self.algorithm == \"greedy\":\n self.load()\n self.plot_houses()\n\n # Call correct algorithm\n else:\n self.load()\n if self.algorithm == \"stepdown\":\n stepdown(self)\n elif self.algorithm == \"greedy\":\n greedy(self, self.iterations)\n elif self.algorithm == \"hill\":\n hill_climber(self, self.iterations)\n elif self.algorithm == \"dfs\":\n dfs(self)\n elif self.algorithm == \"random\":\n random_algorithm(self, self.iterations)\n elif self.algorithm == \"bnb\":\n bnb(self)\n\n self.load()\n self.plot_houses()", "def __init__(self,num_trees=100, depth_limit=5, example_subsample_rate=0.4,\n attr_subsample_rate=0.4):\n\n # TODO: finish this.\n self.num_trees = num_trees\n self.depth_limit = depth_limit\n self.example_subsample_rate = example_subsample_rate\n self.attr_subsample_rate = attr_subsample_rate\n self.classifier = RandomForest(self.num_trees, self.depth_limit, self.example_subsample_rate,\n self.attr_subsample_rate)", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def grow_trees(self, regrow=False):\n if self.forest == [] or regrow:\n mtry = int(math.floor(math.sqrt(len(self.variables))))\n data, trees, var, pred_index = self.data, self.trees, self.variables, self.prediction_index\n attr_fn, dist_classes, order, imp = self.attr_fn, self.dist_classes, len(self.data), self.importance_fn\n self.forest = random_forest.RandomForest(data, trees, mtry, var, pred_index, attr_fn, dist_classes, order, imp)\n print self.trees, ' have been grown using a set of ', len(self.variables), ' variables.'\n else:\n print \"Already a forest in place, add regrow=True to override.\"", "def main(args = []):\n \"\"\" \n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(\"Starting crazy calculations...\")\n print(\"The {}-th Fibonacci number is {}\".format(args.n, fib(args.n)))\n _logger.info(\"Script ends here\")\n \"\"\"\n\n setup_logging(1)\n _logger.debug(\"Starting crazy calculations...\")\n _logger.info(\"Script ends here\")\n\n sb.set_style(style=\"whitegrid\")\n sb.set_color_codes()\n\n mean = [0, 0]\n cov = [[1, 0], [0, 1]] # diagonal covariance\n Nobjs = 3000\n x, y = np.random.multivariate_normal(mean, cov, Nobjs).T\n # Add manual outlier\n x[0] = 3.3\n y[0] = 3.3\n X = np.array([x, y]).T\n plt.figure(figsize=(7, 7))\n plt.scatter(x, y, s=15, facecolor='k', edgecolor='k')\n\n start = time.time()\n\n F = iso_forest.iForest(X, ntrees=500, sample_size=256)\n S = F.compute_paths(X_in=X)\n\n end = time.time()\n _logger.info(\"Elapsed (with compilation) = %s\" % (end - start))\n\n f, axes = plt.subplots(1, 1, figsize=(7, 7), sharex=True)\n sb.distplot(S, kde=True, color=\"b\", ax=axes, axlabel='anomaly score')\n\n ss = np.argsort(S)\n plt.figure(figsize=(7, 7))\n plt.scatter(x, y, s=15, c='b', edgecolor='b')\n plt.scatter(x[ss[-10:]], y[ss[-10:]], s=55, c='k')\n plt.scatter(x[ss[:10]], y[ss[:10]], s=55, c='r')\n\n N = 4000\n x2 = np.random.rand(N)\n y2 = np.sin(x2 * 10.) + np.random.randn(N) / 2.\n\n x2[0] = 0.4;\n y2[0] = 0.9\n x2[1] = 0.6;\n y2[1] = 1.5\n x2[2] = 0.5;\n y2[2] = -3.\n X2 = np.array([x2, y2]).T\n plt.figure(figsize=(9, 6))\n plt.scatter(x2, y2, c='b', edgecolor='b')\n plt.scatter(x2[:3], y2[:3], c='k')\n plt.ylim(-3.2, 3.2)\n plt.xlim(0, 1)\n\n F2 = iso_forest.iForest(X2, ntrees=500, sample_size=512)\n S2 = F2.compute_paths(X_in=X2)\n f, axes = plt.subplots(1, 1, figsize=(7, 7), sharex=True)\n sb.distplot(S2, kde=True, color=\"b\", ax=axes, axlabel='anomaly score')\n\n ss = np.argsort(S2)\n plt.figure(figsize=(9, 6))\n plt.scatter(x2, y2, c='b', edgecolors='b')\n plt.scatter(x2[ss[-10:]], y2[ss[-10:]], s=55, c='k')\n plt.scatter(x2[ss[:100]], y2[ss[:100]], s=55, c='r')\n\n # plt.show()", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")" ]
[ "0.70378083", "0.6852305", "0.64964634", "0.63519055", "0.6336262", "0.63104767", "0.6238995", "0.6202586", "0.60780305", "0.6055713", "0.6045708", "0.60246295", "0.59518003", "0.5943695", "0.5926124", "0.5907505", "0.5890044", "0.5862114", "0.5828884", "0.58246505", "0.5816776", "0.579766", "0.5788096", "0.5785222", "0.578278", "0.5695299", "0.56788075", "0.5669844", "0.5668587", "0.5587356", "0.5586628", "0.5580691", "0.55719465", "0.5558448", "0.5555041", "0.55472785", "0.5507019", "0.55014163", "0.5500439", "0.54923207", "0.54645294", "0.5456516", "0.5441425", "0.5429942", "0.5427857", "0.5417765", "0.54093295", "0.54004806", "0.53976125", "0.53966486", "0.53962946", "0.53911114", "0.5381802", "0.53798395", "0.5379487", "0.5377953", "0.53755945", "0.5351543", "0.5350487", "0.53427035", "0.53232193", "0.5322636", "0.531726", "0.5316701", "0.5316109", "0.53149813", "0.5312633", "0.53111845", "0.53054637", "0.53038895", "0.5287076", "0.5271271", "0.5270454", "0.5269817", "0.5256324", "0.5256231", "0.52457976", "0.524458", "0.5241098", "0.5235656", "0.5217505", "0.5215487", "0.5192788", "0.5191557", "0.5186414", "0.51835775", "0.51796633", "0.51789474", "0.51775444", "0.5175868", "0.51750255", "0.5174787", "0.51656955", "0.51590466", "0.5151037", "0.5148477", "0.5147761", "0.514097", "0.5128747", "0.5122743" ]
0.61327165
8
1. INITIALISATION PHASE. Generates an initial population of tree(s). The initial population should cover the entire search space as much as possible by uniformly randomizing individuals within the search space constrained by the prescribed lower and upper bounds.
def __init__(self, lower, upper , fun , max_std, min_std , init_numb_trees = 10 , max_numb_trees = 20 , max_seeds = 10 , min_seeds = 1 , epsilon = 0.1 , epsilon_decay = 0.0 , max_iters = 100 , mut_proba = 0.1 , seed = None , ): # generates a seed for the random number generator if (seed == None): self.seed = random.randint(0, 1000) else: self.seed = seed random.seed(self.seed) # assigns properties of FO algorithm self.max_number_trees = max_numb_trees self.max_seeds = max_seeds self.min_seeds = min_seeds self.epsilon = epsilon self.epsilon_decay = epsilon_decay self.max_iters = max_iters self.max_std = max_std self.min_std = min_std self.mut_proba = mut_proba # assigns fitness function self.evaluate = fun # stores lower and upper bounds self.lower = lower self.upper = upper # evaluates dimension of the optimal problem assert ( len(lower)==len(upper) ), \ "'lower' and 'upper' must be of the same dimension." self.dim = len(lower) # initialises a forest of trees self.population = [] for _ in range(init_numb_trees): tree = Tree(lower, upper) if (fun != None): self.population.append((fun(tree.vector), tree)) else: self.population.append((sys.float_info.max, tree)) # initialises iterations counter self.iteration = 1 # creates a seedlings buffer self.seedlings = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_population(pop_size):\n population = []\n for md in range(2, MAX_DEPTH - 1):\n for _ in range(int(pop_size / 2)):\n t = Tree()\n t.random_tree(grow=True, max_depth=md) # Grow method \n population.append(t)\n for _ in range(int(pop_size / 2)):\n t = Tree()\n t.random_tree(grow=False, max_depth=md) # Full method\n population.append(t)\n return population", "def initPopulation(populationSize, initialDepth, assetList):\n\n #list holding all individuals\n population = []\n\n for i in range(populationSize):\n population.append(PortfolioTree(initialDepth, assetList))\n\n return population", "def initial_population(self, size):\n return [self.target_tree] + \\\n [self.mutator.mutate(copy.deepcopy(self.target_tree))\n for i in range(size - 1)]", "def initial_population(target_im, population_size):\r\n # Empty population of chromosomes accoridng to the population size specified.\r\n init_population = numpy.empty(shape=(population_size, \r\n functools.reduce(operator.mul, target_im)),\r\n dtype=numpy.uint8)\r\n for indv_num in range(population_size):\r\n # Randomly generating initial population chromosomes genes values.\r\n init_population[indv_num, :] = numpy.random.random(\r\n functools.reduce(operator.mul, target_im))*256\r\n return init_population", "def init_population(self):\n pass", "def test_seed_initial_population():\n # Test if a initial population can be read in from CSV\n i_population = owp.seed_initial_population('initial_populations.csv')\n # Test if a new population can be generated with i_population as the base\n pop_size = 30\n population = sga.generate_population(pop_size, i_population)\n assert type(i_population) is list\n assert len(population) == pop_size", "def initialise_population(size=10):\n return [Individual(None) for _ in range(size)]", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def init_population(self):\n for idx in xrange(0, self.population_size):\n individual = self.individual_factory.create()\n self.population.append(individual)\n\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n\n # In order to roulette wheel selection work with negative values, \n # we sum all fitness values to the absolute value of the most negative plus one\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n #print self.population_fitness.min()\n #print self.population_fitness\n #print self.normalized_fitness", "def generateInitialPopulation(self):\n population = self.evolver.generatePopulation(self.params.populationSize)\n if self.verbose:\n for i in range(len(population)):\n print(str(i) + \": \" + str(population[i]))\n #print(population[i].lsystem.printGlobalDefinesStatus())\n return population", "def initialize_generation(environment, population_size, num_genes):\n\t# initialize all individuals in the population \n\tall_genotypes = np.random.uniform(-1, 1, (population_size, num_genes))\n\tall_sigmas = np.random.uniform(0.001, 0.1, (population_size, num_genes))\n\tgeneration = [Individual(all_genotypes[i], all_sigmas[i]) for i in range(population_size)]\n\n\t# compute fitness of all individuals\n\tfor individual in generation:\n\t\tindividual.fitness = individual.compute_fitness(environment)\n\n\treturn generation", "def _initialize_trees(self):", "def initialPopulation(self):\n\t\tpopulation = []\n\t\t# generate an initial individual, calculate its fitness and add it to our\n\t\t# new population\n\t\tinitIndiv = self.initialIndividual()\n\t\tself.calcIndividualFitness(initIndiv)\n\t\tpopulation.append(initIndiv)\n\t\t\n\t\t# until we have filled the population\n\t\tfor i in range(self.populationSize):\n\t\t\t# keep mutating the initial individual to get new ones\n\t\t\tmutatedIndiv = self.mutateIndividual(initIndiv)\n\t\t\t# if that new individual is in the population, don't add it, try\n\t\t\t# getting a new one\n\t\t\twhile self.isIndividualInPopulation(mutatedIndiv, population) == True:\n\t\t\t\tmutatedIndiv = self.mutateIndividual(initIndiv)\n\t\t\tself.calcIndividualFitness(mutatedIndiv)\n\t\t\tpopulation.append(mutatedIndiv)\n\t\tself.sortPopulation(population)\n\t\treturn population", "def initialPop(popSize,rangeMin,rangeMax,genLength):\n\t\n\tpop=[]\n\n\tfor i in range(popSize):\n\t\tgenome=[]\n\t\tfor j in range(genLength):\n\t\t\tparam=random.uniform(rangeMin,rangeMax)\n\t\t\tgenome.append(param)\n\t\tpop.append(Gen(genome)) #add each random genome to the pop\n\t\t\t\t\n\treturn pop", "def init_population(self, size):\n print(\"Initializing population.\")\n self.population = []\n for _ in range(size):\n self.population.append(Gene(self.tactics))", "def init_population(self):\n print('Initializing...')\n for i in range(self.part_num):\n x = Particle()\n # initialize random position\n x.Pos = np.zeros(self.dim)\n for j in range(len(x.Pos)):\n x.Pos[j] = np.random.uniform(self.var_size[j][0], self.var_size[j][1])\n # calculate cost from random parameters\n #print(x.Pos)\n x.Cost = self.objective(x.Pos)\n x.Vel = np.zeros(self.dim)\n x.Best_pos = x.Pos\n x.Best_cost = x.Cost\n self.particle.append(x)\n\n if self.particle[i].Best_cost < self.GlobalBest_Cost:\n self.GlobalBest_Cost = self.particle[i].Best_cost\n self.GlobalBest_Pos = self.particle[i].Best_pos\n self.Best_Cost.append(self.GlobalBest_Cost)\n print('Initialize complete, with best cost =',\n self.GlobalBest_Cost, \n \"\\nTemporary best solution:\", \n self.GlobalBest_Pos)", "def create_initial(pop_num, pop, kd_min, kd_max, kp_min, kp_max, ki_min, ki_max):\n\n for s in range(pop_num):\n #Creating the random PID values\n kd_cur = round(random.uniform(kd_min, kd_max), 2)\n kp_cur = round(random.uniform(kp_min, kp_max), 2)\n ki_cur = round(random.uniform(ki_min, ki_max), 2)\n #Into 2-D List. Access via pop[i][j]\n pop.insert(s, [kd_cur, kp_cur, ki_cur])\n return pop", "def initialize_population(self):\n for i in range(GAConfig[\"initial_population_size\"]):\n new_chromosome = Chromosome(GAConfig[\"num_categories\"])\n for gene in self.phones:\n random_category = randint(0, GAConfig[\"num_categories\"] - 1)\n new_chromosome.insert_into_category(random_category, gene)\n #need to make sure that the chromosome has all categories fixed here.\n\n #adds the restrictions to the categories\n if(GAConfig[\"category_restriction\"] == \"True\"):\n new_chromosome = self.space_chrom(new_chromosome)\n\n self.population.append(new_chromosome)\n\n self.population = self.computeFitness.compute(self.population)\n self._sort()", "def initialise_rng(self):\n\n\t\tself.rng = numpy.random.RandomState()", "def init_pop(self):\n genes = np.random.randn( self.population_size * self.individual.gene_count )\n self.population = genes.reshape((self.population_size, -1))\n #print(self.population)", "def initial_population(datapoints_nr, population_size):\n\n # row = members\n population = np.empty(shape=(population_size, datapoints_nr))\n\n for member in range(population_size):\n # each member consist of the indices from 0 to datapoints_nr-1 and is\n # some random assignment between them\n population[member] = random.sample(range(datapoints_nr), datapoints_nr)\n\n return population.astype(int)", "def run(self):\n population_p = self.create_population()\n population_p = self.sort_population(population_p)\n best_x = population_p[0]\n for k in range(self.iteration):\n population_r = []\n # random.shuffle(population_p)\n for i in range(0, self.population_length, 2):\n mother = 0\n father = 1\n children = [self.random_chromosome(), self.random_chromosome()]\n while (mother == father) or (children[0] in population_p) or (children[1] in\n population_p):\n mother = random.randint(0, self.population_length - 1)\n father = random.randint(0, self.population_length - 1)\n children = self.cross(population_p[mother], population_p[father])\n children[0] = self.mutate(children[0])\n children[1] = self.mutate(children[1])\n\n population_r.append(children[0])\n population_r.append(children[1])\n\n population_p = self.new_population(population_p, population_r)\n if self.fitness(population_p[0]) < self.fitness(best_x):\n best_x = population_p[0]\n\n # print(population_p)\n return best_x", "def initialisation(Rsize, config, n_global_in, n_global_out, ke):\n # Creating population of Rsize*Rsize new random individuals\n # population = [[Individual(config, n_global_in, n_global_out)]*Rsize for _ in range(Rsize)]\n reef = [Individual(config, n_global_in, n_global_out) for _ in range(Rsize * Rsize)]\n print \"Reef created with \" + str(len(reef)) + \" solutions\"\n print \"Original size: \" + str(len(reef))\n\n # Eval population\n\n reef, count_evaluations = eval_population(reef, ke)\n # for ind in reef:\n # print str(ind.fitness)\n\n # Calculating fitness mean and std deviation\n fitness = fitness_mean_std(reef)\n\n fitness_mean_validation = fitness[\"validation\"][\"mean\"]\n fitness_std_validation = fitness[\"validation\"][\"std\"]\n fitness_max_validation = fitness[\"validation\"][\"max\"]\n fitness_min_validation = fitness[\"validation\"][\"min\"]\n\n # Deleting corals according to formula\n # It is not the same that the depredation one\n # new_population = [[ind if initial_deletion_check(ind.fitness, fitness_mean, fitness_std) else None for ind in line ] for line in population]\n new_reef = [\n ind if initial_deletion_check(ind.fitness[\"accuracy_validation\"], fitness_mean_validation, fitness_std_validation) else None for\n ind in reef]\n\n print \"Population reduced to: \" + str(len(filter(lambda w: w is not None, new_reef))) + \" solutions\"\n\n # for ind in filter(lambda w: w is not None, new_reef):\n # print str(ind.fitness)\n\n return new_reef", "def generate_initial_states(N_init = 100, J_upper = 5, seed = 0):\n init_obs, init_A = simu_Ohio(T = J_upper, N = N_init, \n seed = seed, sd_G = 3, matrix_output = True, is_real = True)\n init_A = init_A.reshape(1, J_upper, N_init)\n initial_states = np.concatenate([init_obs, init_A], 0)\n initial_states = initial_states.reshape((4 * J_upper, N_init), order = \"F\")\n initial_states = initial_states[:(J_upper * 4 - 1), :] \n return initial_states.T", "def rand_init_guess(problem):\n bnds = variable_bounds(problem)\n if bnds is None:\n _n = problem['N']\n return np.random.rand(((_n - 1) * problem['num_states'] + (_n+1) * problem['num_inputs']) * problem['Nv'])\n else:\n bnds = np.array(bnds)\n bnds = np.where(bnds == np.inf, 1, bnds)\n bnds = np.where(bnds == -np.inf, -1, bnds)\n return np.random.uniform(bnds[:, 0], bnds[:, 1])", "def _initialize(self, size=None):\n if size is None:\n size = self.population_size\n\n return [{\n 'individual': self.op.gen_individual(),\n 'fitness': None\n } for _ in range(size)]", "def first_iteration(mat, num_range, sub):\n\t#Creating the tree for the first iteration. \n\ttree = pMatrix.create_tree(mat, num_range, sub)\n\t\n\t#Adding the tree to all_trees. \n\tall_trees.append(tree)\n\t\n\t#Calculating the total number of states in the first iteration.\n\tnum_states = tree.get_num_states()\n\t\n\t#Adding total number of states for the first iteration to all_total_states.\n\tall_total_states.append(num_states)\n\t\n\t#Adding all states to be explored in the first iteration to all_states_explored.\n\tfor st in tree.get_all_states():\n\t\tall_states_explored.append(st)\n\t\t\n\t#Adding super states from first tree to super_states.\n\tfor sp in tree.get_super_states():\n\t\tsuper_states.append(sp)\n\t\n\t#Adding results for first iteration to final list.\n\tall_results.append(pMatrix.main(mat, num_range,sub))", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def initial_pop(self):\n free_list = list(self.nodes)\n cur_node = random.choice(free_list)\n solution = [cur_node]\n free_list.remove(cur_node)\n while free_list:\n cur_node = random.choice(free_list)\n free_list.remove(cur_node)\n solution.append(cur_node)\n return solution", "def setUp(self):\n N = 10\n pA = 0.5\n pB = 0.5\n qA = 1 - pA\n qB = 1 - pB\n locus_A = (['A'] * int(N * pA)) + (['a'] * int(round(N * qA)))\n locus_B = (['B'] * int(N * pB)) + (['b'] * int(round(N * qB)))\n self.pop = population.Population(N, locus_A, locus_B)", "def create_initial_graph(self):\n # Initialise weights\n for link in self.gene_links:\n link.weight = random.uniform(weight_init_min, weight_init_max)\n # Initialise biases\n for node in self.gene_nodes:\n node.bias = random.uniform(bias_init_min, bias_init_max)\n if node.can_modify:\n node.act_func = self.act_set.get_random_activation_func()\n if node.act_func in [activations.gaussian, activations.sin]:\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)", "def generate_first_population(population_size, mi_per_individual=10):\n\n population = []\n\n while len(population) < population_size:\n\n individual = []\n while len(individual) < mi_per_individual:\n # Get three random intergers 0-9\n m1 = int(random.random() * 9)\n m2 = int(random.random() * 9)\n m3 = int(random.random() * 9)\n\n miller_indices = [m1, m2, m3]\n \n # Make sure [0, 0, 0] is not generated!\n if m1 == m2 == m3 == 0:\n print(\"h, k, l = 0 !!!\")\n miller_indices[int(random.random() * 2)] += (int(random.random() * 8) + 1)\n\n individual.append(miller_indices)\n population.append(individual)\n\n return population", "def randomTree(maxLevels, minLeafLevels, maxChildren, weightRange, level=0, usedLabels = set()):\n numChildren = randint(1,maxChildren) if level < minLeafLevels else (randint(0,maxChildren) if level < maxLevels else 0)\n\n if numChildren > 0:\n childNodes = [randomTree(maxLevels, minLeafLevels, maxChildren, weightRange, level+1, usedLabels) for i in range(numChildren)]\n weights = [int(uniform(*weightRange)) if random() < 0.25 else round(uniform(*weightRange), randint(0,4)) for i in range(numChildren)]\n return PhyloTree_sol.PhyloTree(children = list(zip(childNodes, weights)))\n \n while True:\n label = \"\".join([choice(labels) for i in range(3)])\n if not label in usedLabels:\n break\n \n usedLabels.add(label)\n\n return PhyloTree_sol.PhyloTree(label=label)", "def select_parents(self, population):\n random.shuffle(population)\n return population", "def __init__(self, initial_state):\n self.initial_state = initial_state\n self.final_state = [1, 2, 3, 8, 0, 4, 7, 6, 5]\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)\n self.results = []", "def build_random_function(min_depth, max_depth):\n\n # your code goes here", "def create_init_pop(self, init_dict, init_tour, type):\n self.initial_population_size = self.w.get()\n new_pop = TSPInitialPopulation(init_dict, init_tour, self.initial_population_size,\n type) # plus the population initial size (here is 200)\n return new_pop.pop_group", "def get_random_itree(self, data_sub):\n\n # random_itree: define auxiliary function to implement recursion.\n def random_itree(x_in, current_height, lim):\n if current_height >= lim or x_in.shape[0] <= 1: # Base case check\n return It_node(l=None, r=None, split_attr=None, split_val=None, level=current_height)\n else:\n # Randomly select an attribute q.\n q = np.random.randint(x_in.shape[1])\n # Randomly select a split point p between min and max values of attribute q in X.\n p = np.random.uniform(np.min(x_in[:, q]), np.max(x_in[:, q]))\n # Get left and right subtrees.\n xl = x_in[x_in[:, q] < p, :]\n xr = x_in[x_in[:, q] >= p, :]\n # Recursive case\n return It_node(l=random_itree(xl, current_height+1, lim),\\\n r=random_itree(xr, current_height+1, lim),\\\n split_attr=q,\\\n split_val=p,\\\n level=current_height)\n\n # Build itree\n return random_itree(data_sub, current_height=0, lim=10)", "def spawn_initial_population(self, n_inputs, n_outputs):\n # See genetics.cpp:2498\n # In their code, they initialize a genome from a file and use that to\n # make the initial population.\n\n # I would prefer to start with no connections and mutate the\n # connections in as needed.\n in_nodes = [NodeGene(self.get_next_node_num(), node_type=INPUT)\n for i in range(n_inputs)]\n bias_nodes = [NodeGene(self.get_next_node_num(), node_type=BIAS)]\n out_nodes = [NodeGene(self.get_next_node_num(), node_type=OUTPUT)\n for i in range(n_outputs)]\n nodes = in_nodes + bias_nodes + out_nodes\n\n self.node_map = {n.node_id: n for n in nodes}\n self.base_nodes = [n for n in nodes]\n\n links = []\n\n # Make the first genome\n genesis_genome = Genome(self, nodes=nodes, links=links)\n\n # Add initial links\n # for in_node in in_nodes:\n # genesis_genome.add_specific_link(in_node, out_nodes[0], 0)\n\n # Make the population just this genome\n self.all_genomes = [genesis_genome]\n\n # Make the first spec\n spec_num = self.get_next_species_num()\n spec = Species(self, spec_num)\n spec.add_genome(genesis_genome)\n spec.champ = genesis_genome\n\n self.species[spec_num] = spec", "def star_topology(random, population, args):\r\n for _ in range(len(population)):\r\n yield population[:]", "def make_tree(data, indices, rng_state, leaf_size=30, angular=False):\n is_sparse = scipy.sparse.isspmatrix_csr(data)\n\n # Make a tree recursively until we get below the leaf size\n if indices.shape[0] > leaf_size:\n if is_sparse:\n inds = data.indices\n indptr = data.indptr\n spdata = data.data\n\n if angular:\n (left_indices,\n right_indices) = sparse.sparse_random_projection_cosine_split(\n inds,\n indptr,\n spdata,\n indices,\n rng_state)\n else:\n left_indices, right_indices = \\\n sparse.sparse_random_projection_split(\n inds,\n indptr,\n spdata,\n indices,\n rng_state)\n else:\n if angular:\n (left_indices,\n right_indices) = random_projection_cosine_split(data,\n indices,\n rng_state)\n else:\n left_indices, right_indices = random_projection_split(data,\n indices,\n rng_state)\n left_node = make_tree(data,\n left_indices,\n rng_state,\n leaf_size,\n angular)\n right_node = make_tree(data,\n right_indices,\n rng_state,\n leaf_size,\n angular)\n\n node = RandomProjectionTreeNode(indices, False, left_node, right_node)\n else:\n node = RandomProjectionTreeNode(indices, True, None, None)\n\n return node", "def initialize(self):\n N=self.N\n M=[]\n a=random.rand(self.d,1,self.D)\n M.append(a)\n for i in range(1,N-1):\n a=random.rand(self.d,self.D,self.D)\n M.append(a)\n a=random.rand(self.d,self.D,1)\n M.append(a)\n return M", "def bst_100_rand():\n from bbst import Bst\n from random import shuffle\n rando = [num for num in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n return tree", "def create_individual(self):\n self.genes = np.random.rand(self.chromosome_size)\n self.personal_best = self.genes.copy", "def make_parents(self):\r\n self.parents = []\r\n \r\n for loopindex in range(0, int(self.population_size * 0.6)):\r\n while True:\r\n if loopindex < int(self.population_size * 6 / 15):\r\n parent = random.choice(self.best_districts)\r\n else:\r\n parent = random.choice(self.worst_districts)\r\n \r\n if parent not in self.parents:\r\n self.parents.append(parent)\r\n break", "def initialize(self):\n self.tree = ROOT.TTree('tree', 'tree')\n self.simhitcount = []\n self.simhitarrays = np.array(self.simhitcount, dtype=np.int32)\n self.digitcount = []", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def __init__(self, initial, goal=None):\n \n #fill the grid with random numbers\n from random import randint as IA\n \n #in switches we must keep track of what numbers are here at the start\n #to prevent switching them.\n initialNumber = [[1 for x in range(size)] for y in range(size)]\n \n for i in range(size):\n for j in range(size):\n if(initial[i][j] == 0):\n x = IA(1,9)\n initialNumber[i][j] = 0\n while(not isLegalInBox(initial,i,j,x)):\n x = IA(1,9)\n initial[i][j] = x\n \n self.initialNumber = initialNumber\n self.initial = initial", "def initialize_population(self, params: dict):\n if params.save_example_batch:\n create_folder_if_not_exists(self.run_folder + \"/messages\")\n\n if params.single_pool:\n create_folder_if_not_exists(self.run_folder + \"/agents\")\n if params.evolution:\n create_folder_if_not_exists(self.run_folder + \"/agents_genotype\")\n else:\n create_folder_if_not_exists(self.run_folder + \"/senders\")\n create_folder_if_not_exists(self.run_folder + \"/receivers\")\n if params.evolution:\n create_folder_if_not_exists(self.run_folder + \"/senders_genotype\")\n create_folder_if_not_exists(self.run_folder + \"/receivers_genotype\")\n\n for i in range(params.population_size):\n sender_genotype = None\n receiver_genotype = None\n if params.evolution:\n sender_genotype = generate_genotype(num_nodes=params.init_nodes)\n receiver_genotype = generate_genotype(num_nodes=params.init_nodes)\n\n if params.single_pool:\n self.agents.append(\n SingleAgent(\n self.run_folder, params, genotype=sender_genotype, agent_id=i\n )\n )\n else:\n self.senders.append(\n SenderAgent(\n self.run_folder, params, genotype=sender_genotype, agent_id=i\n )\n )\n self.receivers.append(\n ReceiverAgent(\n self.run_folder, params, genotype=receiver_genotype, agent_id=i\n )\n )", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def initialise_smoothing_level_evolutionary_algorithm_population(self):\n\n parents = []\n parents_population=[]\n offspring = []\n population = []\n while len(parents_population) < self.__population_size:\n for i in range(0, self.__population_size):\n parent = Individual(name='parent')\n parents.append(parent)\n #print(parents)\n # generate offspring here to verify parents traits and allow most promising to produce offspring\n\n populations_genome = [i for i in self.generate_smoothing_level_genome(parents=parents,\n standard_error=self.__standard_error,\n smoothing_level=self.__smoothing_level)]\n\n populations_traits = [i for i in self.express_smoothing_level_genome(individuals_genome=populations_genome,\n standard_error=self.__standard_error,\n smoothing_level=self.__smoothing_level)]\n\n fit_population = self._population_fitness(population_xtraits=populations_traits)\n\n parents_population += fit_population\n\n #create_offspring = Population(individuals=parents_population)\n #create_offspring.reproduce()\n return parents_population", "def init_population(pop_number, variables, domains):\n population = []\n for i in range(pop_number):\n new_individual = {}\n for v in variables:\n # randomly choose an assignment from this variable's domain\n ind = np.random.choice(len(domains[v]))\n new_individual[v] = domains[v][ind]\n population.append(OrderedDict(sorted(new_individual.items())) )\n\n return population", "def create_initial_roots(self):\n\n # Trace each tree, one at a time\n initial_roots = list()\n\n for seed in self.all_seed_pixels:\n\n initial_root = rt.Root([seed], len(self.root_dict))\n self.root_dict[len(self.root_dict)] = initial_root\n\n self.all_seed_roots.add(initial_root)\n initial_roots.append(initial_root)\n\n # Iteratively create all child roots from the initial point\n root_queue = initial_roots\n while root_queue:\n for output_root in self.trace_along_children(root_queue.pop(0)):\n root_queue.append(output_root)", "def initialize_population(center: float, std: float, size: int, dimensionality: int):\n return (\n np.full(size * dimensionality, center).reshape(size, dimensionality)\n + stats.uniform.rvs(size=size * dimensionality, scale=std).reshape(size, dimensionality)\n )", "def _initial_population(draws, model, variables):\n\n population = []\n var_info = {}\n start = model.test_point\n init_rnd = pm.sample_prior_predictive(draws, model=model)\n for v in variables:\n var_info[v.name] = (start[v.name].shape, start[v.name].size)\n\n for i in range(draws):\n point = pm.Point({v.name: init_rnd[v.name][i] for v in variables}, model=model)\n population.append(model.dict_to_array(point))\n\n return np.array(floatX(population)), var_info", "def randomise_init_infs(nodes, infect):\n\n # First remove any current infection in nodes\n mynodes = copy.deepcopy(nodes)\n for node in mynodes:\n if node.state[State.INF_H] > 0:\n node.state[State.SUS_H] += node.state[State.INF_H]\n node.state[State.INF_H] = 0\n if node.state[State.INF_L] > 0:\n node.state[State.SUS_L] += node.state[State.INF_L]\n node.state[State.INF_L] = 0\n\n if isinstance(infect, tuple):\n nhigh = infect[0]\n nlow = infect[1]\n\n while nhigh + nlow > 0:\n node_choice = mynodes[np.random.choice(len(mynodes))]\n while node_choice.state[State.SUS_H] > 0 and nhigh > 0:\n node_choice.state[State.SUS_H] -= 1\n node_choice.state[State.INF_H] += 1\n nhigh -= 1\n while node_choice.state[State.SUS_L] > 0 and nlow > 0:\n node_choice.state[State.SUS_L] -= 1\n node_choice.state[State.INF_L] += 1\n nlow -= 1\n\n elif isinstance(infect, dict):\n regions = infect.keys()\n region_nodes = {region: [] for region in regions}\n for node in mynodes:\n region_nodes[node.region].append(node)\n\n for region in regions:\n nhigh = infect[region][0]\n nlow = infect[region][1]\n\n while nhigh + nlow > 0:\n node_choice = region_nodes[region][np.random.choice(len(region_nodes[region]))]\n while node_choice.state[State.SUS_H] > 0 and nhigh > 0:\n node_choice.state[State.SUS_H] -= 1\n node_choice.state[State.INF_H] += 1\n nhigh -= 1\n while node_choice.state[State.SUS_L] > 0 and nlow > 0:\n node_choice.state[State.SUS_L] -= 1\n node_choice.state[State.INF_L] += 1\n nlow -= 1\n\n else:\n raise TypeError(\"Wrong type for infect argument!\")\n\n return mynodes", "def initialize_persons(self) -> None:\n self.population.initialize_id(0, self.size)\n self.population.initialize_ages(self.min_age, self.max_age, self.size)\n self.population.initialize_positions(self.x_bounds, self.y_bounds,\n self.size)\n self.population.initialize_g_value(self.r, 1/self.k, self.size)\n self.population.initialize_mortality_rate(self.size,\n self.mortality_rate)\n self.population.initialize_susceptibility()\n self.population.initialize_infected_by()\n\n self.persons[:, 7] = 1\n self.persons[:, 10] = 0.1\n self.persons[:, 11] = 0.1\n\n # Update the destination each person is headed to and corresponding\n # speed randomly\n self.persons = self.movement.update_persons(self.persons, self.size,\n self.speed, 1)\n\n self.infected_person = np.random.randint(0, self.size)\n self.persons[self.infected_person, index.g_value] = 3\n self.population.set_infected_at(self.infected_person, 0)\n self.persons[self.infected_person, index.infected_by] = \\\n self.infected_person\n self.persons[self.infected_person, index.social_distance] = 0\n self.persons[self.infected_person, 9] = 1", "def generate_population(size, w, h, N):\r\n population = []\r\n for _ in range(size):\r\n entity = gen_mines(w, h, randint(0, w*h))\r\n entity = (entity[:], count_numbers(gen_board(w, h, entity), N))\r\n population.append(entity)\r\n \r\n return population", "def build_random_population(n: int)->Population:\n DEF_COO = 2\n v = [make_random_automaton(DEF_COO) for i in range(n)]\n return Population(v)", "def __init__(self, _populationSize, _chromosomeClass):\n # a generation is a collection of chromosomes stored in a priority queue\n # which is ordered by fitness\n self.generation = PriorityQueue()\n # store how many chromosomes are in each generation\n self.populationSize = _populationSize\n # store a template for generating chromosomes\n self.chromosomeClass = _chromosomeClass\n # choose a random starting population\n self.randomPopulation()", "def get_random_population():\r\n return [ get_random_individual() for _ in range(POPULATION_COUNT) ]", "def build_initial(domain):\n return random_candidate_float(domain)", "def default_replacement(random, population, parents, offspring, args):\r\n return population", "def generational_replacement(random, population, parents, offspring, args):\r\n num_elites = args.setdefault('num_elites', 0)\r\n population.sort(reverse=True)\r\n offspring.extend(population[:num_elites])\r\n offspring.sort(reverse=True)\r\n survivors = offspring[:len(population)]\r\n return survivors", "def generate_random_population(pop_size):\n\n random_population = []\n for agent in range(pop_size):\n random_population.append(generate_random_agent_keys())\n return random_population", "def default_selection(random, population, args):\r\n return population", "def GenerateInitialState(self, setup=\"zeros\"):\n\n if setup == \"zeros\":\n return np.zeros((self.num_neurons, 1))\n else:\n return np.random.uniform(setup[0],setup[1],size=(self.num_neurons, 1))", "def init_population(puzzle, puzzle_sum, puzzle_scratch, count):\n max_sizes = scoring.calculate_max_sizes(puzzle)\n population = np.zeros((count, len(max_sizes) + 1),\n dtype=np.uint32)\n\n # Seed population with heuristics that sort points different ways\n heuristics = make_heuristic_list()\n for k, heuristic in enumerate(heuristics):\n max_sizes.sort(key=heuristic)\n population[k, 1:] = [point for n, point in max_sizes]\n scoring.score_population(puzzle, puzzle_sum, puzzle_scratch,\n population[:len(heuristics)])\n fill_population_repeating(population, len(heuristics))\n # log.debug('best of %d heuristics is %d', n_heuristics,\n # population[:, 0].min())\n return population", "def generational_replacement(random, population, parents, offspring, args):\n num_elites = args.setdefault('num_elites', 0)\n population.sort(reverse=True)\n offspring.extend(population[:num_elites])\n offspring.sort(reverse=True)\n survivors = offspring[:len(population)]\n return survivors", "def __init__(self, initial_state: State, tree_select_policy,\n tree_expand_policy, rollout_policy, backpropagate_method,\n samples: int = 1000, exploration_const: float = 1.0,\n max_tree_depth: int = 10):\n if samples <= 0 or max_tree_depth <= 1:\n raise ValueError(\"The number of samples must be positive\")\n self._max_samples = samples\n self._exploration_const = exploration_const\n self._tree_select_policy = tree_select_policy\n self._tree_expand_policy = tree_expand_policy\n self._rollout_policy = rollout_policy\n self._back_propagate_policy = backpropagate_method\n self._root = Node(initial_state)\n self._max_tree_depth = max_tree_depth", "def __init__(self, engine, n_population=100, n_generations=150, top_individuals=20,\n top_parents=20, crossover_prob=0.9, seed=None):\n\n self.engine = engine\n self.n_population = n_population\n self.n_generations = n_generations\n self.top_individuals = top_individuals\n self.n_parents = n_population - top_individuals\n self.top_parents = top_parents\n self.crossover_prob = crossover_prob\n self.num_rotors = len(engine.rotors)\n\n # score initial conformation\n self.best_score = self.engine.score()\n self.best_conformation = self.engine.lig_dict['coords']\n if seed:\n random_seed(seed)", "def initialize(self,t0=0.0):\n \n # An connection_distribution_list (store unique connection(defined by weight,syn,prob))\n self.connection_distribution_collection = ConnectionDistributionCollection() # this is \n self.t = t0\n \n # put all subpopulation and all connections into the same platform\n for subpop in self.population_list:\n subpop.simulation = self\n for connpair in self.connection_list:\n connpair.simulation = self\n \n \n \n # initialize population_list, calculate \n \n \n for p in self.population_list:\n p.initialize() # 2 \n \n for c in self.connection_list:\n print 'initialize population'\n c.initialize() # 1", "def construct_random_initial(self):\n x = np.random.random((self._crv_size, self._bound))\n return x", "def init_place(self):\n for i in range(self.numCells):\n x = randint(0,self.nx)\n y = randint(0,self.ny)\n while not self.is_empty(x,y):\n x = randint(0, self.nx)\n y = randint(0, self.ny)\n assert self.put_cell(x, y, i) is True\n self.cells.append(Cell(x,y))\n\n assert self.calc_cost() is True", "def sample_tree(grid, edge_logits, edges, steps=1):\n logger.debug('sample_tree sampling a random spanning tree')\n COUNTERS.sample_tree_calls += 1\n if len(edges) <= 1:\n return edges\n tree = MutableTree(grid, edges)\n V, E, K = tree.VEK\n\n for step in range(steps):\n for e in range(E):\n e = np.random.randint(E) # Sequential scanning doesn't work.\n k1 = tree.remove_edge(e)\n valid_edges = np.where(\n tree.components[grid[1, :]] != tree.components[grid[2, :]])[0]\n valid_probs = edge_logits[valid_edges]\n valid_probs -= valid_probs.max()\n np.exp(valid_probs, out=valid_probs)\n total_prob = valid_probs.sum()\n if total_prob > 0:\n valid_probs *= 0.9999995 / total_prob # Avoid np.binom errors.\n k2 = valid_edges[sample_from_probs(valid_probs)]\n else:\n k2 = k1\n COUNTERS.sample_tree_infeasible += 1\n tree.add_edge(e, k2)\n\n COUNTERS.sample_tree_propose += 1\n COUNTERS.sample_tree_accept += (k1 != k2)\n HISTOGRAMS.sample_tree_log2_choices.update(\n [len(valid_edges).bit_length()])\n\n edges = sorted((grid[1, k], grid[2, k]) for k in tree.e2k.values())\n assert len(edges) == E\n return edges", "def generate_population(population_size, nn_architecture):\n population = []\n for _ in range(population_size):\n population.append(nn.create_nn_from_arch(nn_architecture))\n\n return population", "def initDE(N_p,lb,ub,prob):\n\n\n\n lb = np.full(N_p,lb)\n \n ub = np.full(N_p,ub)\n \n f = np.zeros((N_p,1)) #empty vector for fitness function\n \n fu = np.zeros((N_p,1))#newly created trial vector\n\n D = len(lb) # Determining amount of decision variables\n \n U = np.zeros((N_p,D)) #Matrix for storing trial solutions \n \n #Initial random population \n P = mat.repmat(lb,N_p,1)+mat.repmat((ub-lb),N_p,1)*np.random.rand(len(ub-lb),N_p)\n \n for p in np.arange(N_p):\n f[p]=prob(P[p,])\n \n return lb,ub,f,fu,D,U,P", "def init_population(self, task):\n if task.max_iters != np.inf:\n total_candidates = task.max_iters\n elif task.max_evals != np.inf:\n total_candidates = task.max_evals\n else:\n total_candidates = 0\n self.candidates = []\n x = None\n for i in range(total_candidates):\n while True:\n x = task.lower + task.range * self.random(task.dimension)\n if not np.any([np.all(a == x) for a in self.candidates]):\n self.candidates.append(x)\n break\n\n x_fit = task.eval(self.candidates[0])\n return x, x_fit, {}", "def default_replacement(random, population, parents, offspring, args):\n return population", "def build_random_trees(rows, n_features, max_depth, min_size, n_trees, random_dataset_size):\n trees = []\n for tree_number in range(n_trees):\n print(\"Building tree number:\", tree_number, \"of\", n_trees)\n # Select random dataset from original dataset\n random_dataset = select_random_rows(rows, random_dataset_size)\n\n # Select random features (columns)\n random_features = []\n for random_feature in range (n_features):\n # generate random index number to pick column\n random_column = randrange(len(rows))\n random_features.append(random_column)\n # generate the random tree with randomly picked features (columns) and a random dataset\n random_tree = build_single_random_tree(random_dataset, random_features, max_depth, min_size, 1)\n # add to list of trees\n trees.append(random_tree)\n return trees", "def initialize_dna(self):\n return np.random.rand(1, self.n_genes) * 2 - 1", "def empty_nests(nest, Lb, Ub, pa):\n\tn = nest.shape[0]\n\tK = np.random.random(nest.shape) > pa\n\tstepsize = np.random.rand()*(nest[np.random.permutation(n),:]-nest[np.random.permutation(n),:])\n\tnew_nest = nest + stepsize * K\n\tfor j in range(new_nest.shape[0]):\n\t\ts = new_nest[j,:]\n\t\tnew_nest[j,:] = simple_bounds(s, Lb, Ub)\n\treturn new_nest", "def test_init():\n rng = NonRandom()\n seed = 5\n rng.setSeed(seed)\n wheel = Wheel(rng)\n assert len(wheel.bins) == 38\n assert wheel.rng.value == seed\n assert wheel.rng.choice(range(0, 38)) == range(\n 0, 38)[wheel.rng.value] # == seed", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def generate_population(population_size, member_size):\n population = []\n\n for i in range(population_size):\n population.append(generate_individual(member_size))\n\n return population", "def __init__(self,outerPPRF):\n self.outerPPRF = outerPPRF\n\n self.g = random.randint(0,self.outerPPRF.N-1)", "def initialize(self):\n for i in range(self.number_of_persons):\n gender = 0 if np.random.random() < self.gender_ratio else 1\n age = np.random.randint(15,65)\n days = np.random.randint(0,365)\n if age >= 15 and age < 35:\n if np.random.random() < self.sexual_activity_high:\n sexual_activity = 1\n self.high_sexual_activity.add(i)\n else:\n sexual_activity = 0\n else:\n sexual_activity = 0\n \n p = Person(i,gender, age, days, sexual_activity)\n self.persons.append(p)\n if gender == 0:\n self.straight_males.add(i)\n else:\n self.females.add(i)\n self.singles.add(i)\n \n age_group = int(np.floor((age+5)/10))-2\n self.age_group[age_group].add(i)\n \n self.number_of_singles = self.number_of_persons", "def _seed_population(self):\n return [self._generate_weights() for x in range(self.population_size)]", "def __init__(self, n, prey_cnt=0, predator_cnt=0):\n # print n, prey_cnt, predator_cnt\n self.grid_size = n\n self.grid = []\n for i in range(n):\n row = [0]*n # row is a list of n zeros\n self.grid.append(row)\n self.init_animals(prey_cnt, predator_cnt)", "def WeightInitializer():\n return np.random.uniform(-1, 1)", "def get_n_random_itrees(self, n, subs_size):\n random_itrees = np.empty(n, dtype=object) # Allocate list for storing the trees.\n # TODO: parallelize!\n for k in np.arange(n):\n # Get a random sample of training examples to build next random itree.\n data_sub = self.data[np.random.choice(self.data.shape[0], subs_size, replace=False), :]\n random_itrees[k] = self.get_random_itree(data_sub) # Get next random itree \n self.random_itrees = random_itrees\n self.subs_size = subs_size", "def __init__(self, max_depth=None, criterion='gini', random_state=0):\n print(\"Initialize the model Decision Tree Classifier... \")\n self.random_state = random_state\n self.model = tree.DecisionTreeClassifier(max_depth=max_depth, criterion=criterion, random_state=random_state)", "def _get_initial_solution(self):\n\n if self.initial_solution_strategy == 'random':\n solution = range(1, self.number_of_cities_ + 1)\n random.seed(self.seed)\n random.shuffle(solution)\n return solution\n elif self.initial_solution_strategy == 'greedy':\n bag_of_cities = range(0, self.number_of_cities_)\n random.seed(self.seed)\n solution = [random.randint(1, self.number_of_cities_)]\n bag_of_cities.remove(solution[0] - 1)\n for i in range(1, self.number_of_cities_):\n distances = [self.distance_matrix[solution[i - 1] - 1][j] for j in bag_of_cities]\n solution.append(bag_of_cities[np.argmin(distances)] + 1)\n bag_of_cities.remove(solution[i] - 1)\n return solution\n else:\n raise AttributeError('`initial_solution_strategy` must be either `greedy` or `random`')", "def initial_population(amount):\n\n\tpopulation = []\n\tscores = []\n\n\tfor i in range(amount):\n\n\t\ttimetable_info = []\n\t\tscore_info = []\n\n\t\t# create a new random schedule\n\t\tchambers, allcourses, student_list, schedule = create_schedule()\n\n\t\t# add all information about this specific schedule\n\t\tscore_info.append(allcourses)\n\t\tscore_info.append(student_list)\n\t\tscore_info.append(chambers)\n\n\t\t# add individual schedule-info to timetable array\n\t\ttimetable_info.append(score_info)\n\t\ttimetable_info.append(schedule)\n\n\t\t# add the array with individual timetable-info to the population\n\t\tpopulation.append(timetable_info)\n\n\n\treturn population", "def init_random_state(self):\n self.current_state = self.rng.uniform(size=[1, self.num_spins])\n self.current_state = np.where(self.current_state < 0.5, -1.0, 1.0)", "def Generate_Uniform( self, Spacing=None, Variation=0.75 ):\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n tpa = 0.0\n for t in trees:\n years = self.Data.Stand[s].Tree[t].Year.keys()\n tpa += self.Data.Stand[s].Tree[t].Year[years[0]].TPA\n #print tpa\n if( Spacing==None ):\n #tpa = self.Data.Stand[s].Year[15].TPA\n rows = math.floor( math.sqrt( 43560 ) / math.sqrt( 43560 / math.ceil( tpa ) ) )\n spacing = 208.71 / rows\n else:\n spacing = Spacing\n print( tpa, spacing )\n GRID = {}\n x = 5\n y = 5\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n if( x > 208.71 ):\n x = 5\n y += spacing\n if( y > 208.71 ):\n x = 5\n y = 5\n GRID[t] = (x,y)\n x += spacing\n for t in trees:\n g = int(random.uniform( 1, tpa))\n var = random.uniform( 0, Variation)\n ang = random.uniform( 0, 360 )\n (ox,oy) = self.Compute_Offset( ang, var)\n #print ox, oy\n (x,y) = GRID[g]\n self.Data.Stand[s].Tree[t].X = x+ox\n self.Data.Stand[s].Tree[t].Y = y+oy", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def initialize(self):\n for _ in range(self.configuration.n_pop):\n individual = self.random_edge_toggle_list()\n rating = self.rate_edge_toggle_list(individual)\n self._population.append((individual, rating))\n\n # Set Hall of Fame individual\n self.update_hall_of_fame(self._population)", "def growPopulation(P,G):\n population = []\n for i in range(P):\n basicPerm = range(1,G)\n random.shuffle(basicPerm)\n population.append([0]+basicPerm)\n return population", "def random_init(constr=None):\n if constr is not None:\n pass\n else:\n constr = {}\n if \"PERIODS\" in constr.keys():\n periods = constr[\"PERIODS\"]\n else:\n periods = np.random.randint(2, 20)\n if \"AGENTS\" in constr.keys():\n agents = constr[\"AGENTS\"]\n else:\n agents = np.random.randint(100, 5000)\n if \"SEED\" in constr.keys():\n seed = constr[\"SEED\"]\n else:\n seed = np.random.randint(1000, 10000)\n if \"SHARE\" in constr.keys():\n share = constr[\"SHARE\"]\n else:\n share = np.random.uniform(0.1, 0.8)\n if \"FILE\" in constr.keys():\n file = constr[\"FILE\"]\n else:\n file = str(uuid.uuid4()).upper().replace(\"-\", \"\")[0:8]\n\n init_dict = {\"SIMULATION\": {}, \"PARAMS\": {}, \"DIST\": {}}\n\n init_dict[\"SIMULATION\"][\"periods\"] = periods\n init_dict[\"SIMULATION\"][\"agents\"] = agents\n init_dict[\"SIMULATION\"][\"share\"] = share\n init_dict[\"SIMULATION\"][\"seed\"] = seed\n init_dict[\"SIMULATION\"][\"file\"] = file\n\n init_dict[\"PARAMS\"][\"alpha\"] = np.random.normal(1, 0.25)\n init_dict[\"PARAMS\"][\"theta\"] = np.random.normal(0.1, 0.025)\n\n init_dict[\"DIST\"][\"beta\"] = np.random.normal(0.75, 0.1)\n init_dict[\"DIST\"][\"mu\"] = np.random.normal(0.5, 0.1)\n\n print_dict(init_dict)\n\n return init_dict" ]
[ "0.6994021", "0.6852024", "0.6667913", "0.65817934", "0.6566167", "0.6549831", "0.6475767", "0.6451392", "0.63917255", "0.63385963", "0.6332622", "0.6298666", "0.6279179", "0.6251736", "0.62178755", "0.61706847", "0.6140735", "0.60748637", "0.6073261", "0.60537773", "0.6051447", "0.60447174", "0.6036396", "0.6018691", "0.59904355", "0.5985773", "0.5975049", "0.5967593", "0.59605336", "0.5925007", "0.5883441", "0.5879441", "0.5874301", "0.58724266", "0.5868261", "0.58560705", "0.5841712", "0.583258", "0.58254695", "0.58249253", "0.58161175", "0.58019066", "0.580102", "0.5798843", "0.57927465", "0.57918227", "0.57798326", "0.576892", "0.5750354", "0.5749087", "0.57377845", "0.57361525", "0.57309306", "0.57290626", "0.57249826", "0.5719349", "0.5702068", "0.5701753", "0.56928384", "0.5680678", "0.5676249", "0.5664361", "0.56575626", "0.5654032", "0.56513107", "0.56510764", "0.5639421", "0.5635372", "0.5631054", "0.561899", "0.56061745", "0.56038564", "0.560315", "0.5602956", "0.5594208", "0.55852807", "0.5572233", "0.55669945", "0.55440235", "0.5543491", "0.5534292", "0.55256313", "0.55149233", "0.55090016", "0.55068964", "0.5506019", "0.5495922", "0.5485719", "0.5484022", "0.5478012", "0.5476932", "0.54760045", "0.54706377", "0.5466029", "0.5464954", "0.5464833", "0.5463549", "0.5461708", "0.5453787", "0.54500246" ]
0.58912426
30
2. SELECTION PHASE. If a tree does not reproduce, it becomes extinct. Thus, this leads to the requirement of a competitive exclusion in order to eliminate those trees with lower metric values. This is done to limit the maximum number of trees in the forest. Initially, fast reproduction of trees take place and all of them are included in the forest. The fitter trees reproduce more than the undesirable ones. Here, "fitter" is either in terms of objective or novelty (in novelty search). This elimination mechanism is activated when the population exceeds the preselected maximum number of trees in the forest. To do so, the trees and their seeds are ranked and those with lower fitness values are removed to sustain a manageable tree population.
def select(self): def truncate(self): """ Truncates forest to maximum number of trees. """ self.population = self.population[:self.max_number_trees] def SortOnItem(list_, item_loc): """ Sorts based on a given item. """ templist = [elmt[item_loc] for elmt in list_] index = np.argsort(templist) return [list_[i] for i in index] # adds current seedlings to forest for tree in self.seedlings: # if tree does not competes with another existing one, adds it if tree not in self.population: self.population.append(tree) # sorts the trees of the forest in ascending values - minimization self.population = SortOnItem(self.population, item_loc=0) # removes unfit trees from forest truncate(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prune( tree, impurity_crit, dataSet, treeSeq ):\n\n\t\tsaved = {}\n\n\t\ttotal_leaf_impurity, num_leaves = DecisionTree._fetch(tree, impurity_crit, dataSet, saved)\n\n\t\tnodes, sets, G = saved['node'], saved['set'], saved['G']\n\n\t\t# choose TreeNode such that g is minimum to prune\n\t\tmin_g_ind = np.argmin(G)\n\t\tnode2Prune = nodes[min_g_ind]\n\t\tnode2Prune.value = DecisionTree._make_leaf(sets[min_g_ind], impurity_crit)\n\t\tnode2Prune.cut_off = None\n\n\t\t# get a new tree pruned\n\t\ttreeSeq['alpha'].append(G[min_g_ind])\n\t\ttreeSeq['tree'].append(tree)\n\t\ttreeSeq['num_leaves'].append(num_leaves-node2Prune.leaves()+1)\n\n\t\tif not (tree.left.cut_off is None and tree.right.cut_off is None):\n\n\t\t\tDecisionTree._prune(deepcopy(tree), impurity_crit, dataSet, treeSeq )\n\t\telse:\n\t\t\treturn", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def test_rand_100_depth_remains_less_than_8():\n from bbst import Bst\n from random import shuffle\n max_depth = 0\n for x in range(10):\n rando = [x for x in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n tree_depth = tree.depth()\n if tree_depth > max_depth:\n max_depth = tree_depth\n assert max_depth == 8", "def extra_trees_test(n_jobs=1):\n # model = models.RandomForest.ExtraTreesModel()\n # model.run('cv')\n\n # tune the model - 15 trees already gives .13 RMSE, I think that's slightly better than RF with that number of trees\n params = {\n 'n_estimators': [15, 50, 100, 250]\n }\n model = models.RandomForest.ExtraTreesModel(\n grid_search_parameters=params,\n grid_search_sample=0.5,\n n_jobs=n_jobs\n )\n model.run('grid_search', refit=True)\n # 2014-01-21 05:45:28 - Base - INFO - Found best parameters:\n # 2014-01-21 05:45:28 - Base - INFO - {'n_estimators': 250}\n # 2014-01-21 05:45:28 - Base - INFO - Predicting on holdout set\n # 2014-01-21 05:45:41 - classes - INFO - RMSE: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - RMSE on holdout set: 0.124530683233\n # 2014-01-21 05:45:41 - Base - INFO - Grid search completed in 8916.21896791\n # 2014-01-21 05:45:41 - Base - INFO - Model completed in 9332.45440102\n\n # As expected, more trees = better performance. Seems like the performance is on par/slightly better than random forest", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def improve_tree(tree, freq_dict):\n # todo", "def fit_tree_stump_forest(X_train: np.ndarray, y_train: np.ndarray, n_estimators: int) -> RandomForestClassifier:\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf = clf.fit(X_train, y_train)\n return clf", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def MaxParsimonyNoTable(X, Tree, tip_row_dict, do_tips=False, naming=False):\n # 2 represents {0,1} set\n sp_to_arr = lambda sp_arr: np.array(sp_arr.todense().astype(np.int8))[0]\n wrap = lambda x: sp_to_arr(X[tip_row_dict[x.name]]) if x.is_leaf() and not do_tips else sp_to_arr(x.genotype)\n tree_len = 0\n for _ in Tree.traverse(): tree_len += 1\n for i, node in tqdm.tqdm(enumerate(Tree.traverse('postorder')), total=tree_len,\n desc='Ancestral Reconstruction: 1st pass'):\n if node.is_leaf():\n if not do_tips:\n node.genotype = X[tip_row_dict[node.name]]\n continue\n if naming: node.name = i\n children = [wrap(c) for c in node.children]\n res = children[0].copy()\n eq = np.equal(*children)\n res[children[0] == 2] = children[1][children[0] == 2] # 2 is the union {0,1}\n res[children[1] == 2] = children[0][children[1] == 2]\n res[(children[0] != 2) & (children[1] != 2) & ~eq] = 2\n node.genotype = sp.csr_matrix(res)\n\n post = Tree.traverse('preorder')\n root = next(post)\n root.random = (wrap(root) == 2)\n root.genotype[root.genotype == 2] = np.random.choice([1, 0], size=(root.genotype == 2).sum())\n for node in tqdm.tqdm(post, total=tree_len - 1, desc='Ancestral Reconstruction: 2nd pass'):\n if node.is_leaf(): continue\n parent_ = wrap(node.up)\n node_ = wrap(node)\n res = node_.copy()\n res[node_ == 2] = parent_[node_ == 2]\n node.random = (node.up.random) & (node_ == 2) # these are unstable positions - will not be counted\n node.genotype = sp.csr_matrix(res)\n\n return Tree", "def __init__(self, n_trees=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, \n max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,\n verbose=0, min_density=None, compute_importances=None): \n self.random_forest = RandomForestClassifier(n_trees, criterion, max_depth, min_samples_split, min_samples_leaf, \n max_features, max_leaf_nodes, bootstrap, oob_score, n_jobs, random_state,\n verbose, min_density, compute_importances)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def test_with_data(data):\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n unpruned = induce_node_tree(training_set, original_issues, \"D\", -1)\r\n pruned = prune_tree(unpruned, tuning_set)\r\n\r\n return pruned", "def max_depth_forest(self):\n return max(x.tree_.max_depth for x in self.result.estimators_)", "def decision(grid):\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()", "def recursive_feature_elimination(self):\n\t\tsvc = SVC(kernel=\"linear\")\n\t\tself.model = Pipeline([\n\t\t\t('feature_selection', RFE(estimator=svc, n_features_to_select=8, step=10)),\n\t\t\t('classification', self.model)\n\t\t\t])", "def __init__(self,num_trees=100, depth_limit=5, example_subsample_rate=0.4,\n attr_subsample_rate=0.4):\n\n # TODO: finish this.\n self.num_trees = num_trees\n self.depth_limit = depth_limit\n self.example_subsample_rate = example_subsample_rate\n self.attr_subsample_rate = attr_subsample_rate\n self.classifier = RandomForest(self.num_trees, self.depth_limit, self.example_subsample_rate,\n self.attr_subsample_rate)", "def guessTreeOpt(train, test, valid):\n best = findApproxDepth(train, valid, 5, 5)\n tree = DecisionTree(train)\n print(\"building tree from full set\")\n tree.buildTree(best[0], best[1], True)\n print(\"tree built, testing tree\")\n acc = testTreeF(tree, test)\n print(\"accuracy of:\", \"%.2f\" % (acc * 100))\n return tree", "def __init__(self, max_depth=None, criterion='gini', random_state=0):\n print(\"Initialize the model Decision Tree Classifier... \")\n self.random_state = random_state\n self.model = tree.DecisionTreeClassifier(max_depth=max_depth, criterion=criterion, random_state=random_state)", "def step_tree(self):\n if random.random() < self.world.f or self.any_neighbor_burning():\n self.set_state(\"orange\")", "def _next_to_prune(tree, children=None):\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def forest_model(params):\n if (params['random']):\n params['n_estimators'] = random.choice([1, 3, 5, 10, 20, 30, 40, 50, 75, 100])\n model = ExtraTreesClassifier(\n n_estimators=params['n_estimators'],\n random_state=0\n )\n\n return model", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def tree(self):\n\n tree_parameters = [{'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1]}]\n tree_grid = GridSearchCV(estimator=DecisionTreeRegressor(),\n param_grid=tree_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n tree_grid_result = tree_grid.fit(self.X_train, self.y_train)\n best_tree_parameters = tree_grid_result.best_params_\n tree_score = tree_grid_result.best_score_\n print('Best tree params: ' + str(best_tree_parameters))\n print('Tree score: ' + str(tree_score))\n return DecisionTreeRegressor(\n min_samples_leaf=best_tree_parameters['min_samples_leaf'],\n criterion=best_tree_parameters['criterion'],\n random_state=1)", "def __init__(self, n_estimators=10, max_features=None, min_samples_split=10, max_depth=None, criterion=None):\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.max_features = max_features\n self.n_estimators = n_estimators\n self.trees = []", "def tree_optimize(mvp_tree,coefs=None):\n if not coefs:\n coefs = [1,1,1]\n # TODO", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def __init__(self, n_estimators=100, n_jobs=100,\n n_bins=2, min_leaf=1, max_depth=2, n_samples=1.0, max_features=\"auto\", \n bootstrap=True, random_state=42, criterion=\"auc_sub\", bias_method=\"avg\", \n compound_bias_method=\"avg\", orthogonality=.5):\n self.is_fit = False\n self.n_bins = n_bins\n self.n_jobs = n_jobs\n self.min_leaf = min_leaf\n self.max_depth = max_depth\n self.n_samples = n_samples\n self.criterion = criterion\n self.max_features = max_features\n self.bias_method = bias_method\n self.orthogonality = orthogonality\n self.bootstrap = bootstrap\n self.random_state = random_state \n self.n_estimators = n_estimators\n self.compound_bias_method = compound_bias_method\n \n\n # Generating FairRandomForest\n dts = [\n FairDecisionTreeClassifier(\n n_bins=self.n_bins,\n min_leaf=self.min_leaf,\n max_depth=self.max_depth,\n n_samples=self.n_samples,\n criterion=self.criterion,\n random_state=self.random_state+i,\n max_features=self.max_features,\n bias_method=self.bias_method,\n orthogonality=self.orthogonality,\n bootstrap=self.bootstrap,\n compound_bias_method=self.compound_bias_method,\n )\n for i in range(self.n_estimators)\n ]\n self.trees = dts", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def truncate(self):\n\n self.population = self.population[:self.max_number_trees]", "def finetune_depth():\n start_depth = 3\n tol = 10E-4\n best_depth = start_depth\n acc = [-1]\n for i in tqdm(range(20),desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n XGBCla = get_XGBmodel(depth=i+start_depth)\n XGBCla.fit(X_train, y_train)\n pred = XGBCla.predict(X_test)\n acc.append(accuracy_score(y_test, pred))\n if (abs(acc[i]-acc[i+1])<tol):\n break\n if (acc[i]<acc[i+1]):\n best_depth = start_depth + i\n print(\"Accuracy: %.4f\" % acc[-1])\n print(\"Best depth: %d\" % best_depth)", "def fit(self, X:np.ndarray, e=0, improved=False):\n if e>=self.height_limit or len(X)<=1:\n self.n_nodes = self.n_nodes + 1\n return Tree(X,None,None,None,None,'ex')\n else:\n Q = np.arange(X.shape[1], dtype='int')\n q = np.random.choice(Q)\n q_min = X[:,q].min()\n q_max = X[:,q].max()\n if improved:\n p_list = np.random.uniform(q_min,q_max,5)\n best_p = q_max\n x_len = len(X)\n for p in p_list:\n X_left = X[np.where(X[:,q] < p)]\n X_right = X[np.where(X[:,q] >= p)]\n if min(len(X_left), len(X_right))<=5:\n best_p = p\n break\n if min(len(X_left), len(X_right))<x_len:\n best_p = p\n else:\n best_p = np.random.uniform(q_min,q_max)\n X_left = X[np.where(X[:,q] < best_p)]\n X_right = X[np.where(X[:,q] >= best_p)]\n self.n_nodes = self.n_nodes + 1\n self.root = Tree(None,q, best_p, self.fit(X_left,e+1), self.fit(X_right,e+1), 'in')\n return self.root", "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def rf_tuning(n_estimators, min_samples_leaf, min_samples_split, max_leaf_nodes, max_features, max_depth, train_x,\n test_x, train_y, test_y):\n rf_tuning = RandomForestClassifier(n_estimators=n_estimators, min_samples_leaf=min_samples_leaf,\n min_samples_split=min_samples_split, max_leaf_nodes=max_leaf_nodes,\n max_features=max_features, max_depth=max_depth)\n rf_tuning.fit(train_x, train_y)\n predictions = rf_tuning.predict(test_x)\n recall = recall_score(test_y, predictions, average=\"macro\")\n return recall", "def selection(self):\n\n # Sort root Teams from best to worst\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n\n # Save trainer and top agent so far\n self.save()\n if self.AGENT_NAME == \"\":\n ranked_agents[0].save(Trainer.ENV_NAME)\n else:\n ranked_agents[0].save(Trainer.AGENT_NAME)\n\n # Sanity check: There should always be R_SIZE root Teams at this point\n if len(self.agent_pop) != Trainer.R_SIZE:\n print(\"WARNING - Trainer::selection - len(self.agent_pop) != Trainer.R_SIZE\")\n print(\" len(self.agent_pop) = \", len(self.agent_pop))\n\n # Calculate the number of root Teams to retain\n num_keep = int(Trainer.PERCENT_KEEP * Trainer.R_SIZE)\n\n # Grab slice of sorted root Team references to delete\n agents_to_delete = ranked_agents[num_keep:]\n\n # Clean all root Teams in the Teams-to-delete list\n # Note: Still need to clean the population of Learners which\n # may now contain orphans and update the root Team population.\n for agent in agents_to_delete:\n\n team = agent.team\n\n # Safety and sanity check: These should ALL be root Teams\n if team.getNumReferencingLearners() != 0:\n print(\"WARNING - Trainer::selection - A non-root Team is being deleted!\")\n\n team.removeLearners()\n self.team_pop.remove(team)\n\n # Clean up orphanced Learners after Team removal\n self.cleanOrphanedLearners()", "def random_forest_grid(features, df, param_dict): \n X= features\n y = df['Severity'].values\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator = rf, param_distributions = param_dict, n_iter = 70, cv = 5, scoring='f1',\n verbose=2, random_state=42, n_jobs = -1)\n result = rf_random.fit(X, y)\n return result.best_score_, result.best_params_", "def selection(self):\n\n for i in range(self.pop_num*3): # It is important. Next, we will rank the array of parents and children in ascending order of survivability (sum (fit)).\n self.par_and_sons[i].fit = SimpleSegmentationGA.fitness_function(self.gray, self.delta_x, self.length, self.par_and_sons[i].A)\n\n # Sort.\n self.par_and_sons = sorted(self.par_and_sons, key=lambda individ: individ.fit) \n self.population=self.par_and_sons[:self.pop_num].copy()", "def __init__(self, n_children='4-4', **kwargs):\n super(HspringsV1TreeRandomDrop, self).__init__(\n n_children, **kwargs)", "def test_remove_all_values5(delete_tree):\n delete_tree.remove(\"teabaggers\")\n delete_tree.remove(\"teabagged\")\n delete_tree.remove(\"teabagger\")\n delete_tree.remove(\"teabag\")\n delete_tree.remove(\"teabags\")\n delete_tree.remove(\"ted\")\n delete_tree.remove(\"tea\")", "def search(state, cut_value, game, prune=True):\n print (game.step)\n f = open(\"search_tree_\" + str(game.step) + \".txt\", 'w')\n\n def max_value(state, alpha, beta, depth, node):\n start = time.time()\n if game.cutoff(state, depth):\n return game.evaluate(state), None\n val = -inf\n action = None\n pre_val = game.evaluate(state)\n print (\"pre \" + str(pre_val))\n for a, s in game.successors(state):\n #print (str(a))\n cur_val = game.evaluate(s)\n #print (str(a) + ':' + str(cur_val))\n node_child = Search_node(node, a, cur_val)\n node.add_child(node_child)\n if cur_val > pre_val + cut_value:\n v, _ = min_value(s, alpha, beta, depth + 1, node_child)\n f.write(\"a: \" + str(a) + \"; v: \" + str(v) + \"; depth:\" + \\\n str(depth) + \"; alpha:\" + str(alpha) + \"; beta:\" + str(beta) \\\n + \" \\n\")\n else:\n v = cur_val\n if v > val:\n val = v\n action = a\n if prune:\n if v >= beta:\n return v, a\n alpha = max(alpha, v)\n end = time.time()\n print(\"max t:\" + str(end - start))\n return val, action\n\n def min_value(state, alpha, beta, depth, node):\n if game.cutoff(state, depth):\n return game.evaluate(state), None\n val = inf\n action = None\n pre_val = game.evaluate(state)\n print (\"min pre \" + str(pre_val))\n for a, s in game.successors(state):\n cur_val = game.evaluate(s)\n node_child = Search_node(node, a, cur_val)\n node.add_child(node_child)\n if cur_val < pre_val - cut_value:\n v, _ = max_value(s, alpha, beta, depth + 1, node_child)\n # f.write(\"a: \" + str(a) + \"; v: \" + str(v) + \"; depth:\" + \\\n # str(depth) + \"; alpha:\" + str(alpha) + \"; beta:\" + str(beta) + \" \\n\")\n else:\n v = cur_val\n if v < val:\n val = v\n action = a\n if prune:\n if v <= alpha:\n return v, a\n beta = min(beta, v)\n return val, action\n\n root_node = Search_node(None, None, 0)\n\n _, action = max_value(state, -inf, inf, 0, root_node)\n root_node.print_tree()\n f.close()\n return action", "def initialisation(Rsize, config, n_global_in, n_global_out, ke):\n # Creating population of Rsize*Rsize new random individuals\n # population = [[Individual(config, n_global_in, n_global_out)]*Rsize for _ in range(Rsize)]\n reef = [Individual(config, n_global_in, n_global_out) for _ in range(Rsize * Rsize)]\n print \"Reef created with \" + str(len(reef)) + \" solutions\"\n print \"Original size: \" + str(len(reef))\n\n # Eval population\n\n reef, count_evaluations = eval_population(reef, ke)\n # for ind in reef:\n # print str(ind.fitness)\n\n # Calculating fitness mean and std deviation\n fitness = fitness_mean_std(reef)\n\n fitness_mean_validation = fitness[\"validation\"][\"mean\"]\n fitness_std_validation = fitness[\"validation\"][\"std\"]\n fitness_max_validation = fitness[\"validation\"][\"max\"]\n fitness_min_validation = fitness[\"validation\"][\"min\"]\n\n # Deleting corals according to formula\n # It is not the same that the depredation one\n # new_population = [[ind if initial_deletion_check(ind.fitness, fitness_mean, fitness_std) else None for ind in line ] for line in population]\n new_reef = [\n ind if initial_deletion_check(ind.fitness[\"accuracy_validation\"], fitness_mean_validation, fitness_std_validation) else None for\n ind in reef]\n\n print \"Population reduced to: \" + str(len(filter(lambda w: w is not None, new_reef))) + \" solutions\"\n\n # for ind in filter(lambda w: w is not None, new_reef):\n # print str(ind.fitness)\n\n return new_reef", "def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def pruning_order(self, max_to_prune=None):\n\n def _get_terminal_nodes(children):\n \"\"\"Lists the nodes that only have leaves as children\"\"\"\n leaves = np.where(children[:,0]==_tree.TREE_LEAF)[0]\n child_is_leaf = np.in1d(children, leaves).reshape(children.shape)\n return np.where(np.all(child_is_leaf, axis=1))[0]\n\n def _next_to_prune(tree, children=None):\n \"\"\"Weakest link pruning for the subtree defined by children\"\"\"\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]\n\n if max_to_prune is None:\n max_to_prune = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n\n children = np.array([self.children_left.copy(), self.children_right.copy()]).T\n nodes = list()\n\n while True:\n node = _next_to_prune(self, children)\n nodes.append(node)\n\n if (len(nodes) == max_to_prune) or (node == 0):\n return np.array(nodes)\n\n #Remove the subtree from the children array\n children[children[node], :] = _tree.TREE_UNDEFINED\n children[node, :] = _tree.TREE_LEAF", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def prune(self, x_val, y_val):\n\n # make sure that the classifier has been trained before predicting\n if not self.is_trained:\n raise Exception(\"DecisionTreeClassifier has not yet been trained.\")\n\n # get the maximum depth\n deepest_depth = get_max_depth(self.root)\n\n # explore the depth starting from (max_depth - 1) to half of the max_depth\n half_of_max_depth = deepest_depth // 2\n for depth in range(deepest_depth - 1, half_of_max_depth, -1):\n explore_nodes_to_prune(self, self.root, x_val, y_val, depth)\n\n print(\"Pruning completed\")", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def prune(tree, minGain, evaluationFunction=entropy, notify=False):\n # recursive call for each branch\n if tree.trueBranch.results == None: prune(tree.trueBranch, minGain, evaluationFunction, notify)\n if tree.falseBranch.results == None: prune(tree.falseBranch, minGain, evaluationFunction, notify)\n\n # merge leaves (potentionally)\n if tree.trueBranch.results != None and tree.falseBranch.results != None:\n tb, fb = [], []\n\n for v, c in tree.trueBranch.results.items(): tb += [[v]] * c\n for v, c in tree.falseBranch.results.items(): fb += [[v]] * c\n\n p = float(len(tb)) / len(tb + fb)\n delta = evaluationFunction(tb+fb) - p*evaluationFunction(tb) - (1-p)*evaluationFunction(fb)\n if delta < minGain:\n if notify: print('A branch was pruned: gain = %f' % delta)\n tree.trueBranch, tree.falseBranch = None, None\n tree.results = uniqueCounts(tb + fb)", "def build_random_function(min_depth, max_depth):\n\n # your code goes here", "def prune(self, rng, get_nodes, max_depth=1):\n if not self.children:\n return\n for i_c, child in enumerate(self.children):\n if child.min_depth >= max_depth:\n self.children[i_c] = Node(\n rng.choice(get_nodes(arity=0)),\n self.tree_type)\n self.children[i_c].parent = self\n elif max_depth > 1:\n child.prune(rng, get_nodes, max_depth - 1)", "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)", "def BiasedTree(N,alpha=0.): \n free = sample(range(N),N)\n nodes = [free.pop()]\n links = []\n K = np.zeros((N,))\n K[nodes[0]]=1.\n while free:\n newn = free.pop()\n K[newn]=1.\n p = K[np.array(nodes)]**alpha\n p = p/np.sum(p)\n mother = np.random.choice(nodes,p=p)\n K[mother] += 1.\n nodes.append(newn)\n links.append((mother,newn))\n return nx.DiGraph(links)", "def concrete_search(self, limit):\n frontier = Frontier_SortedList.Frontier_SortedList()\n closed = {}\n initial_node = TreeNode.TreeNode(\n id=0,\n state=self.problem.initial_state,\n cost=0,\n node_depth=0,\n f=None,\n parent=None,\n last_action=None,\n )\n initial_node.f = self.__f_strategy(initial_node)\n id = 1\n frontier.insert(initial_node)\n solution = False\n while not solution and not frontier.is_empty():\n actual_node = frontier.remove()\n pruned = False\n if self.problem.is_goal(actual_node.state):\n solution = True\n else:\n if self.pruning == 1:\n pruned = self.check_node_pruning_1st_prune(actual_node, closed)\n if not pruned:\n closed[actual_node.state.create_md5()] = abs(actual_node.f)\n\n if self.pruning in [0, 1]:\n if not pruned:\n if actual_node.node_depth < limit:\n frontier, id = self.expand_node(id, actual_node, frontier)\n\n if self.pruning == 2:\n if actual_node.node_depth < limit:\n list_nodes, id = self.expand_node_2nd_prune(id, actual_node)\n for node in list_nodes:\n md5 = node.state.create_md5()\n if md5 not in closed or closed[md5] > abs(node.f):\n closed[md5] = abs(node.f)\n frontier.insert(node)\n if solution:\n return self.solution(actual_node)\n else:\n return None", "def test_random_partitions_kernel_unbalanced(unbalanced_data):\n X, y = unbalanced_data\n\n # extra trees chooses a different variable to split, so just use RF\n forest = RandomForestClassifierKernel(\n n_estimators=3,\n kernel_type='random_partitions',\n sampling_method='supervised',\n random_state=123)\n K = forest.fit_transform(X, y)\n\n K_expected = np.array([[ 1. , 1. , 1. ,\n 0.33333333, 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 0.33333333],\n [ 1. , 1. , 1. ,\n 0.33333333, 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 0.33333333],\n [ 1. , 1. , 1. ,\n 0.33333333, 0.33333333, 0.33333333,\n 0.33333333, 0.33333333, 0.33333333],\n [ 0.33333333, 0.33333333, 0.33333333,\n 1. , 1. , 0.66666667,\n 1. , 0.66666667, 1. ],\n [ 0.33333333, 0.33333333, 0.33333333,\n 1. , 1. , 0.66666667,\n 1. , 0.66666667, 1. ],\n [ 0.33333333, 0.33333333, 0.33333333,\n 0.66666667, 0.66666667, 1. ,\n 0.66666667, 1. , 0.66666667],\n [ 0.33333333, 0.33333333, 0.33333333,\n 1. , 1. , 0.66666667,\n 1. , 0.66666667, 1. ],\n [ 0.33333333, 0.33333333, 0.33333333,\n 0.66666667, 0.66666667, 1. ,\n 0.66666667, 1. , 0.66666667],\n [ 0.33333333, 0.33333333, 0.33333333,\n 1. , 1. , 0.66666667,\n 1. , 0.66666667, 1. ]])\n np.testing.assert_allclose(K, K_expected)", "def build_random_trees(rows, n_features, max_depth, min_size, n_trees, random_dataset_size):\n trees = []\n for tree_number in range(n_trees):\n print(\"Building tree number:\", tree_number, \"of\", n_trees)\n # Select random dataset from original dataset\n random_dataset = select_random_rows(rows, random_dataset_size)\n\n # Select random features (columns)\n random_features = []\n for random_feature in range (n_features):\n # generate random index number to pick column\n random_column = randrange(len(rows))\n random_features.append(random_column)\n # generate the random tree with randomly picked features (columns) and a random dataset\n random_tree = build_single_random_tree(random_dataset, random_features, max_depth, min_size, 1)\n # add to list of trees\n trees.append(random_tree)\n return trees", "def build_most_significant_tree(ktree, outputdir):\n stree = Tree()\n root = max(ktree.parents.keys())\n for cpt, popval in ktree.population.items():\n if popval == 1:\n stree.population[cpt] = 1\n _iter_build_most_significant_tree(ktree, stree, root)\n with open(os.path.join(outputdir, \"most_significant_tree.p\"), \"wb\") as f:\n pickle.dump(stree, f)\n return stree", "def fit(self, dataSet, prune=False, validSet=None):\n\t\t\n\t\tmodel_args = self._model_complexity_args.copy()\n\t\tif prune:\n\t\t\tif type(validSet).__name__ != 'ndarray':\n\t\t\t\traise AttributeError(\"To make pruning, validation set accept 'ndarray'\\\n\t\t\t\t\t, cannot be {}!\".format(type(validSet).__name__))\n\t\t\t# get a fully-grown tree\n\t\t\tmodel_args['min_impurity_decrease'] = 0\n\t\t\tmodel_args['min_samples_split'] = 2\n\t\t\n\t\tif self._treeType == 'reg':\n\t\t\timpurity_crit = DecisionTree._MSE\n\t\telif self._treeType == 'clf':\n\t\t\timpurity_crit = DecisionTree._Gini\n\n\n\t\telse:\n\t\t\traise ValueError(\"Argument 'treeType' accept 'clf' or 'reg' only\")\n\t\tself._root = DecisionTree._createTree(dataSet, impurity_crit=impurity_crit,\n\t\t\t\t\t\t\t\t\t\t\t**model_args)\n\n\t\tprint(\"Decision Tree Generated!\")\n\n\t\tif prune:\n\t\t\tprint(\"Pruning...\")\n\t\t\ttreeSeq = {'tree':[self._root], 'alpha':[0], 'num_leaves': [self._root.leaves()]} \n\t\t\tpruned_tree = DecisionTree._prune(deepcopy(self._root), impurity_crit, dataSet, treeSeq)\n\t\t\tprint('Pruning Done: %d pruned sub tree got' % len(treeSeq['tree']))\n\t\t\tprint('choosing best subtree through validation set...')\n\t\t\tbestSubtree, error_score = DecisionTree._bestSubtree(treeSeq, impurity_crit, validSet)\n\t\t\tprint('best subtree selected with error score: {}'.format(error_score))\n\n\t\t\tself._root = bestSubtree", "def evaluate_cuts(base_tree, node):\n config = Configuration.config # Collect configuration\n\n N = config.normals # Collect predefined set of normal vectors\n N = np.append(N, node.auxiliary_normals, axis=0) # Append partition's bounding-box-aligned vectors as normals\n N = np.unique(np.round(N, 3), axis=0) # Return sorted unique elements of input array_like\n\n trees = []\n for i in range(N.shape[0]):\n trees_of_this_normal = [] # start a list of trees for splits along this normal\n normal = N[i] # current normal\n for plane in bsp_tree.get_planes(node.part, normal): # iterate over all valid cutting planes for the node\n tree, result = bsp_tree.expand_node(base_tree, node.path, plane) # split the node using the plane\n if tree: # only keep the tree if the split is successful\n trees_of_this_normal.append(tree)\n logger.debug(f\"normal index: {i}, origin: {plane[0]}, normal: {plane[1]}, result: {result}\")\n if len(trees_of_this_normal) == 0: # avoid empty list errors during objective function evaluation\n logger.info(f\"normal index: {i}, trees for normal: {len(trees_of_this_normal)}, total trees: {len(trees)}\")\n continue\n # go through each objective function, evaluate the objective function for each tree in this normal's\n # list, fill in the data in each tree object in the list\n for evaluate_objective_func in objectives.values():\n evaluate_objective_func(trees_of_this_normal, node.path)\n trees += trees_of_this_normal\n logger.info(f\"normal index: {i}, trees for normal: {len(trees_of_this_normal)}, total trees: {len(trees)}\")\n\n # go through the list of trees, best ones first, and throw away any that are too similar to another tree already\n # in the result list\n result_set = []\n for tree in sorted(trees, key=lambda x: x.objective):\n if tree.sufficiently_different(node, result_set):\n result_set.append(tree)\n logger.info(f\"{len(result_set)} valid trees\")\n return result_set", "def __init__(self, n_estimators=100, max_depth=2**31-1, learning_rate=0.1, min_samples_split=2,\n min_samples_leaf=1, subsample=1.0, colsample_bytree=1.0, max_bin=225, random_state=None):\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.learning_rate = learning_rate\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.subsample = subsample\n self.colsample_bytree = colsample_bytree\n self.max_bin = max_bin\n self.random_state = random_state\n self.f_0 = None\n self.trees = dict()", "def greedy_build(nodes, priors=None, cutoff=200, considered=set(), uniq='', targets=[]):\n\n\t# Tracks frequency of states for each character in nodes\n\tcharacter_mutation_mapping = defaultdict(int)\n\n\t# G models the network that is returned recursively\n\tG = nx.DiGraph()\n\n\troot = root_finder(nodes)\n\n\t# Base case check for recursion, returns a graph with one node corresponding to the root of the remaining nodes\n\tif len(nodes) <= cutoff or len(nodes) == 1:\n\t\troot = root_finder(nodes)\n\t\tG.add_node(root)\n\t\treturn G, [[root, nodes]]\n\n\t# Accounting for frequency of mutated states per character, in order to choose the best split\n\tfor node in nodes:\n\t\tnode_list = node.split(\"_\")[0].split('|')\n\t\tfor i in range(0, len(node_list)):\n\t\t\tchar = node_list[i]\n\t\t\tif char != '0' and char != '-':\n\t\t\t\tcharacter_mutation_mapping[(str(i), char)] += 1\n #if char != '0':\n # if char == \"-\":\n # character_mutation_mapping[(str(i), char)] -= 1\n # else:\n # character_mutation_mapping[(str(i), char)] += 1\n\n\t# Choosing the best mutation to split on (ie character and state)\n\tcharacter, state = 0, 0\n\tmax_cost = 0\n\n\tmin_prior = 1\n\tif priors:\n\t\tfor i in priors.keys():\n\t\t\tfor j in priors[i].keys():\n\t\t\t\tmin_prior = min(min_prior, priors[i][j])\n\n\tfor i,j in character_mutation_mapping:\n\t\tif not (i,j) in considered:\n\t\t\tif not priors:\n\t\t\t\tif max_cost < character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\t\t\telse:\n\t\t\t\tif j not in priors[int(i)]:\n\t\t\t\t\tpriors[int(i)][j] = min_prior\n\t\t\t\tif max_cost < -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\tcharacter = int(character)\n\n\n\t# If there is no good split left, stop the process and return a graph with the remainder of nodes\n\tif character == 0 and state == 0:\n\t\tif len(nodes) == 1:\n\t\t\tG.add_node(nodes[0])\n\t\telse:\n\t\t\tfor i in range(0, len(nodes)):\n\t\t\t\tif nodes[i] != root:\n\t\t\t\t\tG.add_edge(root, nodes[i])\n\t\treturn G, []\n\n\t# Splitting nodes based on whether they have the mutation, don't have the mutation, or are NA('-') in that character\n\t# Right split is where nodes with the mutation go, everyone else goes to left split or NA chars\n\tleft_split, right_split, NA_chars = [], [], []\n\tright_split_temp = []\n\tleft_split_temp = []\n\tfor node in nodes:\n\t\tnode_list = node.split('|')\n\t\tif node_list[character] == state:\n\t\t\tright_split.append(node)\n\t\telif node_list[character] == '-':\n\t\t\tNA_chars.append(node)\n\t\telse:\n\t\t\tleft_split.append(node)\n\n\n\t# Seperates all nodes with NA in the character chosen to be split upon\n\t# Puts in right split or left split based on which list shares more mutated characters with this string\n\tfor node in NA_chars:\n\t\tright_split_score = 0\n\t\tleft_split_score = 0\n\t\tnode_list = node.split('|')\n\t\tnum_not_missing = len([n for n in node_list if n != \"-\"])\n\t\tfor i in range(0, len(node_list)):\n\t\t\tif node_list[i] != '0' and node_list[i] != '-':\n\t\t\t\tfor node_2 in left_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tleft_split_score += 1\n\t\t\t\tfor node_2 in right_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tright_split_score += 1\n\n\t\tavg_left_split_score = left_split_score / float(len(left_split) * num_not_missing + 1)\n\t\tavg_right_split_score = right_split_score / float(len(right_split) * num_not_missing + 1)\n\n\t\tif avg_left_split_score < avg_right_split_score:\n\t\t\tright_split_temp.append(node)\n\t\telse:\n\t\t\tleft_split_temp.append(node)\n\n\tright_split += right_split_temp\n\tleft_split += left_split_temp\n\n\t# Add character, state that split occurred to already considered mutations\n\tconsidered.add((str(character), state))\n\tG = nx.DiGraph()\n\t#splitter = str(character) + \" \" + str(state) + \" (\" + uniq + \")\"\n\tsplitter = root\n\n\t# Recursively build left side of network (ie side that did not mutation at the character with the specific state)\n\tG.add_node(splitter)\n\tleft_subproblems = []\n\tleft_network = None\n\tif len(left_split) != 0:\n\t\tleft_root = root_finder(left_split)\n\t\t# if left_root not in left_split and left_root in targets:\n\t\t# \tleft_root = left_root + \"_unique\"\n\n\t\tleft_network, left_subproblems = greedy_build(left_split, priors, cutoff, considered.copy(), uniq + \"0\", targets=targets)\n\n\t\tleft_nodes = [node for node in left_network.nodes() if left_network.in_degree(node) == 0]\n\t\tdup_dict = {}\n\t\tfor n in left_network:\n\t\t\tif n in list(G.nodes()) and n != left_root:\n\t\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(left_root.encode('utf-8')).hexdigest())\n\t\tleft_network = nx.relabel_nodes(left_network, dup_dict)\n\t\tG = nx.compose(G, left_network)\n\t\tif root != left_root:\n\t\t\tG.add_edge(splitter, left_root, weight=0, label=\"None\")\n\n\t# Recursively build right side of network\n\tright_network, right_subproblems = greedy_build(right_split, priors, cutoff, considered.copy(), uniq + \"1\", targets=targets)\n\tright_nodes = [node for node in right_network.nodes() if right_network.in_degree(node) == 0]\n\tright_root = root_finder(right_split)\n\n\tdup_dict = {}\n\tfor n in right_network:\n\t\tif n in list(G.nodes()) and n != right_root:\n\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(right_root.encode('utf-8')).hexdigest())\n\tfor n in dup_dict:\n\t\trename_dict = {n: dup_dict[n]}\n\t\tif right_network.out_degree(n) != 0:\n\t\t\tright_network = nx.relabel_nodes(right_network, rename_dict)\n\t\telse:\n\t\t\trename_dict = {n: dup_dict[n]}\n\t\t\tG = nx.relabel_nodes(G, rename_dict)\n\n\tG = nx.compose(G, right_network)\n\t# if right_root not in right_split and right_root in targets:\n\t# \tright_root = right_root + \"_unique\"\n\t#for node in right_nodes:\n\tif root != right_root:\n\t\tif not priors:\n\t\t\tG.add_edge(splitter, right_root, weight=1, label = str(character) + \": 0 -> \" + str(state))\n\t\telse:\n\t\t\tG.add_edge(splitter, right_root, weight=-np.log(priors[int(character)][state]), label=str(character) + \": 0 -> \" + str(state))\n\n\n\treturn G, left_subproblems + right_subproblems", "def findRFBestDepth():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n rforest = ensemble.RandomForestClassifier(max_depth=max_depth, n_estimators=100)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def test_weighted_trees_satisyfing_cutoff(self):\n sct = LogLikelihoodScoredTreeCollection(self.scored_trees)\n cts = sct.getWeightedTrees(cutoff=0.8)\n expected_trees = [Tree(t) for t in \"((a,b),(c,d));\", \"((a,b),(c,d));\",\n \"((a,b),c,d);\"]\n for i in range(len(cts)):\n cts[i][1].sameTopology(expected_trees[i])\n \n ct = cts.getConsensusTree()\n self.assertTrue(ct.sameTopology(Tree(\"((a,b),(c,d));\")))", "def prune_path(clf, X, y, max_n_leaves=10, n_iter=10,\n test_size=0.1, random_state=None, n_jobs=1):\n \n\n from sklearn.base import clone\n from sklearn.cross_validation import StratifiedShuffleSplit,ShuffleSplit\n from sklearn.metrics import roc_auc_score,mean_squared_error\n from multiprocessing.dummy import Pool as ThreadPool\n from itertools import repeat\n import pandas as pd\n #import copy\n \n #classification score\n def my_auc(estimator, X, y):\n y_score = estimator.predict_proba(X)[:,1] # You could also use the binary predict, but probabilities should give you a more realistic score.\n return roc_auc_score(y, y_score)\n \n #regression score\n def my_nmse(estimator, X, y):\n y_pre = estimator.predict(X) # You could also use the binary predict, but probabilities should give you a more realistic score.\n return -mean_squared_error(y, y_pre)\n \n\n if len(np.unique(y)) == 2: \n scoring_fuc = my_auc\n \n else:\n scoring_fuc = my_nmse\n \n def multip_run(fuction,task_zip,n_jobs = 1):\n\n #Multi-process Run\n\n pool = ThreadPool(processes=n_jobs)\n results = pool.starmap(fuction, task_zip)\n pool.close()\n pool.join()\n return results \n\n def OneFoldCut(clf,X_train, y_train,X_test,y_test,max_n_leaves):\n estimator = clone(clf)\n \n fitted = estimator.fit(X_train, y_train)\n \n if max_n_leaves < get_n_leaves(fitted):\n n_leaves = max_n_leaves\n \n else:\n n_leaves = get_n_leaves(fitted)\n \n print('###### Iters true start leaves is %d #######' % n_leaves)\n \n #cut_num = list(range(2,n_leaves, 1))\n cut_num = list(range(n_leaves-1,1,-1))\n #n = len(cut_num)\n loc_indexs = []\n loc_scores = []\n for i in cut_num:\n #clf1 = copy.deepcopy(fitted)\n #clf1 = clone(fitted)\n #clf1.prune(i)\n fitted.prune(i)\n onescore = scoring_fuc(fitted,X_test,y_test)\n #onescore = scoring_fuc(clf1,X_test,y_test)\n loc_scores.append(onescore)\n loc_indexs.append(i)\n \n S = pd.DataFrame(loc_scores,index=loc_indexs)\n\n return S\n\n\n #scores = list()\n if len(np.unique(y)) == 2: \n kf = StratifiedShuffleSplit(y,\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n else:\n kf = ShuffleSplit(len(y),\n n_iter = n_iter, \n test_size= test_size,\n random_state=random_state)\n \n X_trains = [X[tr] for tr,ts in kf]\n y_trains = [y[tr] for tr,ts in kf]\n \n X_tests = [X[ts] for tr,ts in kf]\n y_tests = [y[ts] for tr,ts in kf]\n \n task_zip = zip(repeat(clf),\n X_trains,\n y_trains,\n X_tests,\n y_tests,\n repeat(max_n_leaves))\n \n scores = multip_run(OneFoldCut,task_zip,n_jobs = n_jobs)\n \n df = pd.concat(scores,axis=1)\n df.columns = range(len(df.columns))\n\n return df #zip(*scores)", "def prune_trivial_subtrees(self):\n num_pruned = 0\n if not self.is_leaf:\n children_classes = set()\n num_trivial_children = 0\n for child_node in self.nodes:\n num_pruned += child_node.prune_trivial_subtrees()\n if child_node.is_leaf:\n num_trivial_children += 1\n children_classes.add(child_node.most_common_int_class)\n if num_trivial_children == len(self.nodes) and len(children_classes) == 1:\n self.is_leaf = True\n num_pruned += num_trivial_children\n self.nodes = []\n return num_pruned", "def _initialize_trees(self):", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def DecisionTreeAlgorithm(df, mltask, counter = 0, min_samples = 2, max_depth = 5, random_subspace = None):\n\n if counter == 0:\n global COLUMN_HEADERS, FEATURE_TYPE\n COLUMN_HEADERS = df.columns\n FEATURE_TYPE = hf.determine_type_of_feature(df)\n data = df.values\n else:\n data = df\n \n if (check_purity(data)) or (len(data) < min_samples) or (counter == max_depth):\n leaf = create_leaf(data, mltask)\n return leaf\n \n else:\n counter += 1\n \n potential_splits = get_potential_split(data, random_subspace)\n split_column,split_value = determine_best_split(data, potential_splits, mltask)\n data_below,data_above = split_data(data,split_column,split_value)\n \n if (len(data_below) == 0) or (len(data_above) == 0):\n leaf = create_leaf(data, mltask)\n return leaf\n \n feature_name = COLUMN_HEADERS[split_column]\n type_of_feature = FEATURE_TYPE[split_column]\n if type_of_feature == 'continuous':\n question = '{} <= {}'.format(feature_name,split_value)\n else:\n question = '{} = {}'.format(feature_name,split_value)\n sub_tree = {question:[]}\n \n yes_answer = DecisionTreeAlgorithm(data_below, mltask, counter, min_samples, max_depth, random_subspace)\n no_answer = DecisionTreeAlgorithm(data_above, mltask, counter, min_samples, max_depth, random_subspace)\n \n if yes_answer == no_answer :\n sub_tree = yes_answer\n else :\n sub_tree[question].append(yes_answer)\n sub_tree[question].append(no_answer)\n \n return sub_tree", "def test_animal_with_fitness_level_0_dies():\n animal = animals.Herbivores(age=200, weight=1)\n cell = topo.Jungle()\n cell.add_animal(animal)\n animals.Herbivores.parameters[\"omega\"] = 1\n cell.natural_death_all_animals_in_cell()\n animals.Herbivores.parameters[\"omega\"] = 0.4\n assert len(cell.herbivore_list) == 0", "def random_forest_002(outfile=\"sub_random_forest_002.csv\", n_jobs=4):\n mdl = models.RandomForest.RandomForestMoreFeatures(n_jobs=n_jobs, cv_sample=0.1)\n mdl.run('cv')", "def train(eps, ntrees, min_size, max_splits, nfeats_test, resample=True):\n # TODO your code here\n trees = []\n for _ in range(ntrees):\n # repeatedly add values from the list of expression profiles without removal to a set\n # (so there could be duplicate expression profiles in the set we are creating) until the size of the set\n # is equal to the size of the original list of profiles\n if resample:\n resampled_eps = []\n for _ in range(len(eps)):\n idx = random.randint(0, len(eps) - 1)\n resampled_eps.append(eps[idx])\n trees.append(\n ExpressionDecisionTree.train(resampled_eps, len(resampled_eps), min_size, max_splits, nfeats_test))\n else:\n trees.append(\n ExpressionDecisionTree.train(eps, len(eps), min_size, max_splits, nfeats_test))\n return ExpressionRandomForest(trees)", "def evolve(self, elitism='on', save='off', probability=0.05, rate=0.05):\n if self.state == 'dead':\n\n self.member_fitness = [self.members[i].fitness for i in range(self.size)]\n\n self.fittest_brain = self.members[self.member_fitness.index(max(self.member_fitness))]\n\n if save == 'on':\n self.fittest_brain.save_as('fittest_brain')\n\n self.total_population_fitness = sum(self.member_fitness)\n\n print('Total population fitness is %s' % (self.total_population_fitness))\n\n self.mating_pool = [[self.members[i]] * round(self.member_fitness[i] * 1000 / self.total_population_fitness) for i in range(self.size)]\n\n self.mating_pool = [brain for sublist in self.mating_pool for brain in sublist]\n\n self.children = []\n\n if elitism == 'on':\n\n self.children.append(self.fittest_brain)\n\n for i in range(self.size - 1):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n else:\n for i in range(self.size):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n\n self.members = self.children\n\n self.members[0].state = 'alive'\n\n self.state = 'alive'\n self.generation += 1\n\n else:\n print('Cannot evolve: some members are still alive')", "def sel_roulette(fitness, tournaments, replace=False):\r\n # Create list of indexes\r\n tmp_index, tmp_fitness = [], []\r\n for i, val in enumerate(fitness):\r\n tmp_index.append(i)\r\n tmp_fitness.append(val)\r\n # Normalize with regard to total fitness\r\n total_fitness = sum(tmp_fitness)\r\n # Draw individuals\r\n sel_individuals = []\r\n for tournament in range(tournaments):\r\n # Get random value between [0, total_fitness)\r\n i, value = 0, random.random() * total_fitness\r\n while True:\r\n value -= tmp_fitness[i]\r\n # Check if 'tmp_index[i]' is the winner\r\n if value < 0:\r\n sel_individuals.append(tmp_index[i])\r\n break\r\n i += 1\r\n if replace is False:\r\n del tmp_fitness[i]\r\n del tmp_index[i]\r\n # Adjust interval for random number\r\n total_fitness = sum(tmp_fitness)\r\n return sel_individuals", "def fill(self):\n # Fail fast if num_classes or num_features isn't set.\n _ = getattr(self, 'num_classes')\n _ = getattr(self, 'num_features')\n\n self.training_library_base_dir = getattr(\n self, 'training_library_base_dir', '')\n self.inference_library_base_dir = getattr(\n self, 'inference_library_base_dir', '')\n\n self.bagged_num_features = int(self.feature_bagging_fraction *\n self.num_features)\n\n self.bagged_features = None\n if self.feature_bagging_fraction < 1.0:\n self.bagged_features = [random.sample(\n range(self.num_features),\n self.bagged_num_features) for _ in range(self.num_trees)]\n\n self.regression = getattr(self, 'regression', False)\n\n # Num_outputs is the actual number of outputs (a single prediction for\n # classification, a N-dimenensional point for regression).\n self.num_outputs = self.num_classes if self.regression else 1\n\n # Add an extra column to classes for storing counts, which is needed for\n # regression and avoids having to recompute sums for classification.\n self.num_output_columns = self.num_classes + 1\n\n # Allow each tree to be unbalanced by up to a factor of 2.\n self.max_depth = (self.max_depth or\n int(2 * math.ceil(math.log(self.max_nodes, 2))))\n\n # The Random Forest literature recommends sqrt(# features) for\n # classification problems, and p/3 for regression problems.\n # TODO(thomaswc): Consider capping this for large number of features.\n self.num_splits_to_consider = (\n self.num_splits_to_consider or\n max(10, int(math.ceil(math.sqrt(self.num_features)))))\n\n # max_fertile_nodes doesn't effect performance, only training speed.\n # We therefore set it primarily based upon space considerations.\n # Each fertile node takes up num_splits_to_consider times as much\n # as space as a non-fertile node. We want the fertile nodes to in\n # total only take up as much space as the non-fertile nodes, so\n num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))\n # But always use at least 1000 accumulate slots.\n num_fertile = max(num_fertile, 1000)\n self.max_fertile_nodes = self.max_fertile_nodes or num_fertile\n # But it also never needs to be larger than the number of leaves,\n # which is max_nodes / 2.\n self.max_fertile_nodes = min(self.max_fertile_nodes,\n int(math.ceil(self.max_nodes / 2.0)))\n\n # We have num_splits_to_consider slots to fill, and we want to spend\n # approximately split_after_samples samples initializing them.\n num_split_initializiations_per_input = max(1, int(math.floor(\n self.num_splits_to_consider / self.split_after_samples)))\n self.split_initializations_per_input = getattr(\n self, 'split_initializations_per_input',\n num_split_initializiations_per_input)\n\n # If base_random_seed is 0, the current time will be used to seed the\n # random number generators for each tree. If non-zero, the i-th tree\n # will be seeded with base_random_seed + i.\n self.base_random_seed = getattr(self, 'base_random_seed', 0)\n\n return self", "def disperse(self, tree, dtype=\"normal\", n=2):\n\n # computes \"dispersion spread\" for current generation\n spread = self._dispersion_spread(n)\n\n # creates mother tree's offsprings known as seedlings\n for _ in range(tree.seeds):\n\n # creates new seedling by mutation of mother tree's DNA\n seedling = self._mutate(tree, spread, dtype)\n\n # checks boundaries of optimal problem\n self._check(seedling.vector)\n\n # evaluates fitness\n fitness = self.evaluate(seedling.vector)\n\n # adds new seedling to forest\n self.seedlings.append((fitness, seedling))", "def random_param_tune(self):\n random_grid = {'bootstrap': [True, False],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 4],\n 'min_samples_split': [2, 5, 10],\n 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=250, cv=3, verbose=2, n_jobs=-1)\n rf_random.fit(self.X_train, self.y_train)\n self.results.write(str(rf_random.best_params_) + \"\\n\")", "def tournament_selection(self, population: List[IndividualType]) -> List[IndividualType]:\n survivors: List[IndividualType] = []\n for _ in range(self.configuration.n_survivors):\n # Choose participants\n rooster: List[IndividualType] = random.sample(population, self.configuration.rooster_size)\n # Select fittest of participants as survivor\n fittest_individual_of_rooster = self.get_best_individual(rooster)\n population.remove(fittest_individual_of_rooster)\n survivors.append(fittest_individual_of_rooster)\n return survivors", "def find_significant_children(tree, node):\n if node not in tree.children:\n return None\n smax = 1\n c1, c2 = tree.children[node]\n sch = c1, c2\n while tree.population[c1] > 1 or tree.population[c2] > 1:\n if tree.population[c1] >= tree.population[c2]:\n small, big = c2, c1\n else:\n small, big = c1, c2\n if tree.population[small] >= smax:\n smax = tree.population[small]\n sch = small, big\n c1, c2 = tree.children[big]\n return sch", "def growForest(config, load_exp_file=True):\n\n silent = config.get('silent', False)\n experiment_Path = r\"C:\\Users\\user\\Desktop\\Prediction_model\\experiment\\flood.exp\"\n\n if load_exp_file:\n #loadExperimentFile(config, filename=config.exp)\n loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n\n forests = []\n results = []\n\n\n # do multiple runs if needed. note that we start at config.run, not zero\n for run in range(config.num_runs):\n training_graphs, testing_graphs = splitDict(config.graphs, int(len(config.graphs) * .8), random=True)\n\n \"\"\"\n # perform under-sampling if needed\n if hasattr(config, 'underlabel'):\n under_graphs = {}\n skip_count = 0\n for k in training_graphs.keys():\n if training_graphs[k].class_label == config.underlabel and random.random() <= config.underval:\n skip_count += 1\n else:\n under_graphs[k] = training_graphs[k]\n print('Undersampled ' + str(skip_count) + ' graphs')\n training_graphs = under_graphs\n \"\"\"\n # print out some useful info on the class distribution\n counts = defaultdict(int)\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('training:', len(training_graphs), counts)\n\n counts = defaultdict(int)\n for graph in testing_graphs.values():\n counts[graph.class_label] += 1\n print('testing:', len(testing_graphs), counts)\n\n for graph in training_graphs.values():\n counts[graph.class_label] += 1\n print('total:', len(config.graphs), counts)\n\n print('\\nrun:', run)\n config.run = run\n\n srrf = SRRForest(config)\n #srrf.growForest(training_graphs)\n srrf.growForest(config.graphs)\n forests.append(srrf)\n #srrf.training_graph_ids = list(training_graphs.keys())\n #training_labeling = srrf.labelGraphs(training_graphs,config.time_list)\n #outOfBagLabels=srrf.getOutOfBagLabels()\n #print(\"outOfBagLabels\")\n #print(outOfBagLabels)\n #c=srrf.compute_oob_score(training_graphs, outOfBagLabels)\n #print(\"concordance index:\")\n #print(c)\n config.saveTrees(srrf)\n\n #results.append(c)\n\n\n\n\n \"\"\"\n\n df = pd.DataFrame(columns=['lon', 'lat', 'survival_probability', 'time'])\n\n\n srrf.testing_graph_ids = testing_graphs.keys()\n testing_labeling = srrf.labelGraphs(testing_graphs,config.time_list)\n\n\n\n\n\n\n\n for i,h in testing_labeling.items():\n\n lat = i.graph.attributes_by_type.get(('cell', 'lat'))[0].value\n lon = i.graph.attributes_by_type.get(('cell', 'lon'))[0].value\n for t, label in h.items():\n df = df.append(\n {'lon': lon, 'lat': lat, 'survival_probability': label[1], 'time': t},\n ignore_index=True)\n\n sort_by_time = df.sort_values('time')\n print(sort_by_time.head())\n import plotly.express as px\n fig = px.scatter_mapbox(sort_by_time, lat=\"lat\", lon=\"lon\", hover_data=[\"survival_probability\"],\n color=\"survival_probability\", animation_frame=\"time\", animation_group=\"time\",\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10, height=500)\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n fig.show()\n \"\"\"\n\n\n\n #config.saveTrees((srrf,)) ###config.saveTree is giving us an eror type error: unable to pickle dict keys.\n\n #print('numruns: %s' % (config.num_runs))\n #print(results)\n\n\n #return results", "def test_random_forest_n_estimators_parameter(params, X_train, X_test, y_train, y_test):", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def __init__(self, n_samples=1000, n_features=4):\n self.n_samples = 1000\n self.n_features = 4\n self.forest = []", "def generational_replacement(random, population, parents, offspring, args):\r\n num_elites = args.setdefault('num_elites', 0)\r\n population.sort(reverse=True)\r\n offspring.extend(population[:num_elites])\r\n offspring.sort(reverse=True)\r\n survivors = offspring[:len(population)]\r\n return survivors", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def repair(self, population_size=POPULATION_SIZE, iterations=100):\n self.validate()\n\n population = self.initial_population(population_size)\n\n last_key = ast.dump(self.target_tree)\n\n for iteration in range(iterations):\n population = self.evolve(population)\n\n best_tree = population[0]\n fitness = self.fitness(best_tree)\n\n if self.log:\n print(f\"Evolving population: \"\n f\"iteration{iteration:4}/{iterations} \"\n f\"fitness = {fitness:.5} \\r\", end=\"\")\n\n if self.log >= 2:\n best_key = ast.dump(best_tree)\n if best_key != last_key:\n print()\n print()\n self.log_tree(f\"New best code (fitness = {fitness}):\",\n best_tree)\n last_key = best_key\n\n if fitness >= 1.0:\n break\n\n if self.log:\n print()\n\n if self.log and self.log < 2:\n self.log_tree(f\"Best code (fitness = {fitness}):\", best_tree)\n\n best_tree = self.reduce(best_tree)\n fitness = self.fitness(best_tree)\n\n self.log_tree(f\"Reduced code (fitness = {fitness}):\", best_tree)\n\n return best_tree, fitness", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def fitness(individual, n_clusters=3, n_seeds=5):\n\n dataframe = common.scale_dataframe(individual)\n corr = abs(individual.dataframe.corr().iloc[0, 1])\n differences = []\n for seed in range(n_seeds):\n km = KMeans(n_clusters, random_state=seed).fit(dataframe)\n differences.append(silhouette_score(dataframe, km.labels_) - corr)\n\n best = max(differences)\n return best", "def decision_tree_clf():\n\tclf_entropy = DecisionTreeClassifier(\n\t\tcriterion = \"entropy\", random_state = seed,\n\t\tmax_depth = 3, min_samples_leaf = 5\n\t\t)\n\treturn clf_entropy", "def choose_mother(self, index):\n\n candidates = []\n while not candidates:\n tgt_age = int(sample_table(self.fertility_age_probs[index], self.rng)[0])\n tgt_prev_min = 0; tgt_prev_max = 100\n if self.params['use_parity']:\n # old\n # tgt_prev_min = int(sample_table(\n # self.fertility_parity_probs[(tgt_age-15)/5], self.rng)[0])\n tgt_prev_min = int(sample_table(\n self.fertility_parity_probs[floor((tgt_age-15)/5)], self.rng)[0])\n # effectively transform 5 into 5+\n tgt_prev_max = tgt_prev_min if tgt_prev_min < 5 else 20\n tgt_set = self.P.individuals_by_age(tgt_age, tgt_age)\n candidates = [x\n for x in tgt_set \\\n if x.sex == 1 \\\n and x.can_birth() \\\n and not x.with_parents \\\n and tgt_prev_min <= len(x.children) <= tgt_prev_max\n ]\n # TODO ejw: consider updating parity prob usage to `len(x.children) - 1`\n # the `tgt_prev_min` and `tgt_prev_max` seems to be based on the probability that a mother of age `y` should\n # have `x` children at time period t. Say `x=1` children is chosen, then the mother should have one child.\n # Why should mothers with len(x.children)=1 then be considered as candidates? Shouldn't it be\n # `len(x.children) - 1 = 1`? Meaning, a mother without a child is chosen to have a child?\n # Unless, the parity table is restructured to mean the probability of a women with 0 children having a child\n # This actually makes more sense since the mother's age is chosen based on fertility rates, implying that a\n # mother in this age should have a child. If x=0 means no, then the probability of the mother actually\n # having a child is way too low: P(women of age y have a child) x (1 - P(x=0)).\n # Consider the actual probability tables for ages up-to 19:\n # 0.856 0\n # 0.125 1\n # 0.017 2\n # 0.001 3\n # 0.001 4\n # 0 5\n # The above either means the probability of mother of zero children having a child is quite high.\n # Or a mother not having a child is quite high.\n # From the above, x=5 is zero, but the above logic can assign a mother aged 18 with x=4 to a new child,\n # this making x=5 when she is 19, which should not be possible with the above.\n return self.rng.choice(candidates)", "def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree", "def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def evolve(pop_perf, breed_method):\n # Sort on the scores.\n pop = [x[1] for x in sorted(pop_perf, key=lambda x: x[0], reverse=True)]\n\n # keep the best 25%\n retain_length = 2 #int(np.ceil(len(pop)*.25))\n\n # The parents are every network we want to keep.\n parents = pop[:retain_length]\n\n # Randomly mutate the networks we're keeping, and add these\n # This preserves the already good networks, so we don't lose out.\n mutated = []\n for index, individual in enumerate(parents):\n mutated.append(mutate(parents[index]))\n parents.extend(mutated)\n\n # For those we aren't keeping, randomly add 10% of population to increase variance. Mutate them individually, then add. \n # Mutation because we already know they are bad, should try something else. Something like that.\n num_poor = 2#int(math.ceil(len(pop)*.1))\n poor_keeping = random.sample(pop[retain_length:], num_poor)\n for poor_sch in poor_keeping:\n parents.append(mutate(poor_sch))\n\n # Now find out how many spots we have left to fill. (how many children to make, about 40% of full pop)\n parents_length = len(parents)\n desired_length = len(pop) - parents_length\n children = []\n\n\n # Add children, which are bred from two remaining networks.\n while len(children) < desired_length:\n\n # Get a random mom and dad.\n male = random.randint(0, parents_length-1)\n female = random.randint(0, parents_length-1)\n\n # Assuming they aren't the same network...\n if male != female:\n male = parents[male]\n female = parents[female]\n\n # pick breeding method:\n if random.random() > .5:\n way = 'mean'\n else:\n way = 'random'\n \n # Breed them.\n babies = breed_method(male, female, way)\n\n # children.append(babies[desired_length:])\n # Add the children one at a time.\n for baby in babies:\n # Don't grow larger than desired length.\n if len(children) < desired_length:\n children.append(baby)\n parents.extend(children)\n return parents" ]
[ "0.62971836", "0.62759644", "0.6227639", "0.6085251", "0.6084228", "0.6072655", "0.6057905", "0.6017821", "0.6007997", "0.5916229", "0.58216655", "0.5750822", "0.5743819", "0.5695105", "0.5636572", "0.56322336", "0.5611677", "0.5590373", "0.5590373", "0.55826205", "0.55778784", "0.5545633", "0.55383354", "0.55381334", "0.5524706", "0.5510218", "0.5496254", "0.54962456", "0.5489816", "0.54662806", "0.5458722", "0.54585195", "0.5456201", "0.54558545", "0.5454718", "0.5453844", "0.5444749", "0.540993", "0.5386625", "0.53817505", "0.53761446", "0.536535", "0.53612715", "0.534956", "0.5347688", "0.5347029", "0.53430164", "0.532307", "0.5314918", "0.53024495", "0.52948046", "0.5292295", "0.5287642", "0.5284389", "0.5281324", "0.5280943", "0.527272", "0.5251105", "0.5241454", "0.52340794", "0.5232105", "0.5231754", "0.52280146", "0.52263105", "0.5225172", "0.5217325", "0.52168506", "0.52164674", "0.52070373", "0.5206858", "0.51899725", "0.5183592", "0.5173348", "0.5171207", "0.5167832", "0.5164764", "0.51628166", "0.51598585", "0.5155195", "0.5151942", "0.51494384", "0.51485187", "0.5146286", "0.51434803", "0.5139816", "0.513857", "0.51357067", "0.51328474", "0.5128735", "0.5126463", "0.51190907", "0.5112856", "0.51125807", "0.51115966", "0.5103759", "0.51035196", "0.5101775", "0.51014227", "0.50975657", "0.5095696" ]
0.7099714
0
Truncates forest to maximum number of trees.
def truncate(self): self.population = self.population[:self.max_number_trees]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_depth_forest(self):\n return max(x.tree_.max_depth for x in self.result.estimators_)", "def reset_max_depth(self) -> None:\n # The max depth is now calculated on the fly, so this is a no-op.\n pass", "def truncate_features(self):\n num_variable = len(self.Train_data['X'][0])\n for i in xrange(len(self.Train_data['X'])):\n num_variable = min([num_variable, len(self.Train_data['X'][i])])\n # truncate train, validation and test\n for i in xrange(len(self.Train_data['X'])):\n self.Train_data['X'][i] = self.Train_data['X'][i][0:num_variable]\n for i in xrange(len(self.Validation_data['X'])):\n self.Validation_data['X'][i] = self.Validation_data['X'][i][0:num_variable]\n for i in xrange(len(self.Test_data['X'])):\n self.Test_data['X'][i] = self.Test_data['X'][i][0:num_variable]\n return num_variable", "def set_tree_limit(self, n):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n _check_call(_LIB.TreeliteSetTreeLimit(self.handle, ctypes.c_size_t(n)))", "def deleteMaxTree(root):\n try:\n if (isRed(root['left'])):\n root = rotateRight(root)\n\n if (root['right'] is None):\n return None\n\n if ((not isRed(root['right'])) and\n ((not isRed(root['right']['left'])))):\n\n root = moveRedRight(root)\n\n root['right'] = deleteMaxTree(root['right'])\n root = balance(root)\n return root\n\n except Exception as exp:\n error.reraise(exp, 'RBT:deleteMinTree')", "def maximumDistance(self):\n from ete2 import Tree\n t = Tree(name='LUCA_root')\n empty_forest = {'sp':t,'gns':t,'fam':t,'ord':t,'cls':t,'phy':t,'kng':t}\n return self.distanceToTree(empty_forest,update_inner_attributes=False)", "def prune(self, n_leaves):\n self.tree_ = prune(self.tree_, n_leaves)\n return self", "def _subtree_below_maximum_leaves(self, root, threshold):\r\n\r\n nodes = root.get_terminals()\r\n return len(nodes) <= threshold", "def test_trunc_depth(self):\n from supvisors.statistics import StatisticsInstance\n instance = StatisticsInstance(12, 5)\n # test that the truc_depth method does nothing when less than 5 elements in list\n test_list = [1, 2, 3, 4]\n instance.trunc_depth(test_list)\n self.assertListEqual([1, 2, 3, 4], test_list)\n # test that the truc_depth method keeps only the last 5 elements in list\n test_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n instance.trunc_depth(test_list)\n self.assertListEqual([6, 7, 8, 9, 10], test_list)", "def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []", "def _trim_tree(state):\n for n in list(state.tree.leaf_node_gen):\n if n.type_str == TYPE_NODE_TAG:\n n.parent.child_list.remove(n)\n return _trim_tree(state)", "def resize_to_maximum(self):\n if self.initialized:\n max_size = self._compute_maximum_size()\n self.set_max_size(max_size)\n self.resize(max_size)", "def maxsize(self, maxsize):\n self.shape = (int(maxsize), ) + self.shape[1:]\n self.clear()", "def update_max_fringe_size(self, fringe_len):\n if self.max_fringe_size < fringe_len:\n self.max_fringe_size = fringe_len", "def take_max(self):\n return self.delete_first()", "def final_forest(element):\n if isinstance(element, TamariIntervalPoset):\n return element.initial_forest()\n elif element in DyckWords():\n binary_tree = element.to_binary_tree_tamari()\n elif element in BinaryTrees() or element in LabelledBinaryTrees():\n binary_tree = element\n else:\n raise ValueError(\"Do not know how to construct the initial forest of {}\".format(element))\n\n def get_relations(bt, start=1):\n r\"\"\"\n Recursive method to get the binary tree final forest relations\n with only one recursive reading of the tree.\n\n The vertices are being labelled with integers starting with\n ``start``.\n\n OUTPUT:\n\n - the indexes of the nodes on the left border of the tree\n (these become the roots of the forest)\n - the relations of the final forest (as a list of tuples)\n - the next available index for a node (size of tree +\n ``start``)\n \"\"\"\n if not bt:\n return [], [], start # leaf\n roots, relations, index = get_relations(bt[0], start=start)\n rroots, rrelations, rindex = get_relations(bt[1], start=index + 1)\n roots.append(index)\n relations.extend(rrelations)\n relations.extend([(j, index) for j in rroots])\n return roots, relations, rindex\n\n roots, relations, index = get_relations(binary_tree)\n return TamariIntervalPoset(index - 1, relations)", "def get_n_leaves(clf):\n leaves = clf.tree_.children_left == -1\n leaves = np.arange(0,clf.tree_.node_count)[leaves]\n return len(leaves)", "def del_max(self):\r\n maxVal = self.find_max()\r\n if maxVal is not None:\r\n self.items[1] = self.items[self.size]\r\n self.items[self.size] = None\r\n self.size -= 1\r\n self.perc_down(1)", "def sift_down_recursion(self, index):\n if self.size() == 0:\n return\n\n left = self.left_child(index)\n right = self.right_child(index)\n # if the element is leaf\n if left >= self.size():\n return\n\n max_child_index = left\n if right < self.size():\n if self.heap[right] > self.heap[left]:\n max_child_index = right\n\n # if already max heap, return\n if self.heap[index] >= self.heap[max_child_index]:\n return\n\n self.heap[index], self.heap[max_child_index] = self.heap[max_child_index], self.heap[index]\n\n index = max_child_index\n self.sift_down_recursion(index)", "def unsetMaxLevel(self):\n return _libsbml.QualitativeSpecies_unsetMaxLevel(self)", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def test_remove_top_but_not_root(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"tea\")\n assert delete_tree.size == tree_size - 1", "def merge_forest(self, forest):\n\t\tif len(forest) == 1:\n\t\t\treturn forest[0]\n\n\t\tfinal_tree = forest.pop()\n\t\ttrees_to_process = forest\n\t\twhile not len(trees_to_process) == 0:\n\t\t\ttree_in_question = trees_to_process.pop(0)\n\t\t\tif final_tree.contains(tree_in_question.value):\n\t\t\t\tnodes_to_process = [final_tree]\n\t\t\t\twhile not len(nodes_to_process) == 0:\n\t\t\t\t\tnode_in_question = nodes_to_process.pop()\n\t\t\t\t\tif node_in_question.value == tree_in_question.value:\n\t\t\t\t\t\tnode_in_question.children += [tree_in_question]\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tif not len(node_in_question.children) == 0:\n\t\t\t\t\t\t\tnodes_to_process += node_in_question.children\n\t\t\telse:\n\t\t\t\ttrees_to_process += [final_tree]\n\t\t\t\tfinal_tree = tree_in_question\n\t\treturn final_tree", "def largest_killing_spree(self, largest_killing_spree):\n\n self._largest_killing_spree = largest_killing_spree", "def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)", "def truncate_best_frames(self):\n\n # Compute the stack size from the given percentage. Take at least one frame.\n max_frames = max(\n int(ceil(self.frames.number * self.configuration.quality_area_frame_percent / 100.)), 1)\n\n # This is a precaution measure: Do not take more frames than elements in the shortest\n # ranking list.\n self.stack_size = min(max_frames, min(\n [min([len(self.quality_areas[j][i]['best_frame_indices']) for i in arange(self.x_dim)])\n for j in arange(self.y_dim)]))\n\n # For all quality areas: Truncate the \"best frame indices\" list to the uniform selected\n # stack size.\n for index_y, quality_area_row in enumerate(self.quality_areas):\n for index_x, quality_area in enumerate(quality_area_row):\n quality_area['best_frame_indices'] = quality_area['best_frame_indices'][\n :self.stack_size]\n\n # Register the coordinates of this quality area with all frames where it is used.\n for frame_index in quality_area['best_frame_indices']:\n\n # If for this frame the list is empty, initialize it as a list of lists.\n # Otherwise append the 2-list with quality area coordinates.\n if self.frames.used_quality_areas[frame_index]:\n self.frames.used_quality_areas[frame_index].append([index_y, index_x])\n else:\n self.frames.used_quality_areas[frame_index] = [[index_y, index_x]]", "def max_depth(self) -> int:\n return 0", "def remove_big(s: Stack) -> None:\n #\n temp = Stack()\n while not s.is_empty():\n val = s.pop()\n # Only keep values less than or equal to five.\n if val <= 5:\n temp.push(val)\n\n # Restore the original stack.\n while not temp.is_empty():\n s.push(temp.pop())", "def size(self):\n return self.variables.end_of_tree - 1", "def prune(self, n_leaves):\n true_node_count = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n leaves = np.where(self.children_left == _tree.TREE_LEAF)[0]\n to_remove_count = true_node_count - 2*n_leaves + 1\n\n nodes_to_remove = pruning_order(self, max_to_prune = to_remove_count/2)\n\n # self._copy is gone, but this does the same thing\n out_tree = _tree.Tree(*self.__reduce__()[1])\n out_tree.__setstate__(self.__getstate__().copy())\n\n for node in nodes_to_remove:\n #TODO: Add a Tree method to remove a branch of a tree\n out_tree.children_left[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_left[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_right[out_tree.children_right[node]] = _tree.TREE_UNDEFINED\n out_tree.children_left[node] = _tree.TREE_LEAF\n out_tree.children_right[node] = _tree.TREE_LEAF\n\n # FIXME: currently should not change node_count, after deletion\n # this is not number of nodes in the tree\n #out_tree.node_count -= 2*len(nodes_to_remove)\n\n return out_tree", "def grow_trees(self, regrow=False):\n if self.forest == [] or regrow:\n mtry = int(math.floor(math.sqrt(len(self.variables))))\n data, trees, var, pred_index = self.data, self.trees, self.variables, self.prediction_index\n attr_fn, dist_classes, order, imp = self.attr_fn, self.dist_classes, len(self.data), self.importance_fn\n self.forest = random_forest.RandomForest(data, trees, mtry, var, pred_index, attr_fn, dist_classes, order, imp)\n print self.trees, ' have been grown using a set of ', len(self.variables), ' variables.'\n else:\n print \"Already a forest in place, add regrow=True to override.\"", "def DEFAULT_MAX_DEPTH(self): # real signature unknown; restored from __doc__\n pass", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def truncate_weights(self, max_weight):\n S = sum(self.weights)\n to_trunc = (self.weights > S*max_weight)\n n_to_trunc = sum(to_trunc)\n if n_to_trunc == 0:\n S = sum(self.weights)\n if not S > 0.:\n raise ValueError(f'Sum of weights is {S} but should be positive')\n self.weights /= S\n return S\n \n print(f\"Truncating {n_to_trunc:d} weights\")\n to_not_trunc = torch.logical_not(to_trunc)\n sum_untrunc = sum(self.weights[to_not_trunc])\n if sum_untrunc == 0:\n # Impossible to truncate further!\n S = sum(self.weights)\n if not S > 0.:\n raise ValueError(f'Sum of weights is {S} but should be positive')\n self.weights /= S\n return S\n trunc_to = max_weight * sum_untrunc / (1. - max_weight * n_to_trunc)\n max_untrunc = torch.max(self.weights[to_not_trunc])\n ## trunc_to calculation is done so that\n ## after w[to_trunc]=trunc_to\n ## w[to_trunc] / sum(w) all equal max_weight\n ## **But** we don't want to truncate below next smallest weight\n if trunc_to >= max_untrunc:\n self.weights[to_trunc] = trunc_to\n S = sum(self.weights)\n if not S > 0.:\n raise ValueError(f'Sum of weights is {S} but should be positive')\n self.weights /= S\n return S\n else:\n self.weights[to_trunc] = max_untrunc\n return self.truncate_weights(max_weight)", "def n_trees(self):\n return len(self.data_kd)", "def clean():\n new_tree = None", "def max_depth(self) -> int:\n return pulumi.get(self, \"max_depth\")", "def decrement_depth(self):\r\n self.depth = self.depth - 1", "def test_remove_childless_on_delete_tree(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"teabaggers\")\n assert delete_tree.size == tree_size - 1", "def set_limit(self, limit):\n self.limit = limit\n self._prune()", "def test_rand_100_depth_remains_less_than_8():\n from bbst import Bst\n from random import shuffle\n max_depth = 0\n for x in range(10):\n rando = [x for x in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n tree_depth = tree.depth()\n if tree_depth > max_depth:\n max_depth = tree_depth\n assert max_depth == 8", "def setrecursionlimit(n): # real signature unknown; restored from __doc__\n pass", "def trim_to_upper_length_limit(self) -> None:\n self.trim_utils.lang_model = self.config['language_model']\n\n dataframe_splits = np.array_split(self.data, self.n_cores)\n pool = Pool(self.n_cores)\n self.data = pd.concat(pool.map(self.trim_text_for_dataframe, dataframe_splits))\n pool.close()\n pool.join()", "def prune( self ):\n if self.children is None:\n return\n \n # recursively prune from bottom up\n for space in self.children:\n space.prune()\n\n # if all child nodes are empty remove them all\n for space in self.children:\n if not space.is_empty():\n return\n\n self.children = None", "def cut(self, max_lenght):\n self.V_estimates = self.V_estimates[:max_lenght]\n super().cut(max_lenght)", "def prune_levels(handles, max_level=15):\n for i, handle in enumerate(handles):\n handles[i] = handle.loc[dict(bottom_top=slice(0, max_level))]\n return handles", "def getbestnumberoftrees(features: ndarray, target: ndarray, limit:int) -> tuple:\n\n # Defining the initial accuracy value to compare with different number of trees in training\n accuracy = 0\n accuracyList = []\n\n for n in range(1, limit+1, 1):\n # Training\n trained_model = InternalRandomForest.modeltrain(features, target, n)\n\n # Calculating the percentual accuracy of the training\n accuracy_t = accuracy_score(target, trained_model.predict(features), normalize=True)\n\n # Build accuracy array for this set of number of trees\n accuracyList.append(accuracy_t)\n\n # Verifying if the current training is better than the last one\n if accuracy_t > accuracy:\n bestNumberTrees = n\n accuracy = accuracy_t\n\n # Obtain best trained model\n best_model = InternalRandomForest.modeltrain(features, target, bestNumberTrees)\n\n return bestNumberTrees, accuracyList, best_model", "def truncate_text_by_num_tokens(text, max_tokens, tok_separator=\" \"):\n _toks = text.split(tok_separator)\n return tok_separator.join(_toks[:min(max_tokens, len(_toks))])", "def test_remove_middle_child_on_delete_tree(delete_tree):\n tree_size = delete_tree.size\n delete_tree.remove(\"teabag\")\n assert delete_tree.size == tree_size - 1", "def resize_tree_column(self):\n self.treeView.resizeColumnToContents(0)", "def final_forest(self):\n return TamariIntervalPoset(self.size(), self.decreasing_cover_relations())", "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def _truncate_by_data_gaps(self, df, target_metric):\n\n import numpy as np\n\n max_data_gap = abs(self.min_ts_length / 3.0)\n\n gap_len = 0\n last_avl_idx = None\n for row in df[::-1].iterrows():\n if np.isnan(row[1][target_metric]) or row[1][target_metric] is None:\n gap_len = gap_len + 1\n else:\n gap_len = 0\n last_avl_idx = row[0]\n\n if gap_len >= max_data_gap and last_avl_idx:\n truncated_df = df[last_avl_idx:]\n return truncated_df\n\n return df", "def leave(self):\n self.num_leaves += 1", "def pop_nodes(self, count):\n log.debug('Removing {} Request nodes.'.format(count))\n self.beginRemoveRows(QModelIndex(), 0, count-1)\n if len(self.root_item.children) > 0:\n self.root_item.children = self.root_item.children[count:]\n self.endRemoveRows()", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def delete_max(self):\n max_val = self.peek_max()\n self.remove(max_val)\n return max_val", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def num_trees(self):\n return self._ll_tree_sequence.get_num_trees()", "def _decrease_parent_count(self):\n if self.parent is not None:\n self.parent.size -= 1\n self.parent._decrease_parent_count()", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def num_trees(self) -> int:\n\n return len(self.nodes)", "def get_tree_length(x):\n length = list(self.__plan_graph.objects(subject=x, predicate=AGORA.length)).pop()\n return length", "def _find_max(self, root):\n while root.right:\n root = root.right\n return root", "def clear_tree(self):\n self.treeview.delete(*self.treeview.get_children())", "def truncate(self, count=0, reverse=True):\n if reverse:\n return self.value[:(len(self.value) - count)]\n else:\n return self.value[count:]", "def __cleanup(self):\n while self.levels > 1 and self.head.next == None:\n temp = self.head\n self.head = self.head.below\n del temp\n self.levels -=1", "def minimal_subtree(tree):\n tree_copy = tree.copy()\n\n for n in tree_copy.traverse():\n if len(n.children) == 1:\n n.delete()\n\n new_root = tree_copy\n while len(new_root.children) == 1:\n new_root = new_root.children[0]\n\n new_tree = new_root.detach()\n return new_tree", "def reset(self) -> None:\r\n self.tree.delete(*self.tree.get_children())", "def deleteMinTree(root):\n try:\n if (root['left'] is None):\n return None\n if ((not isRed(root['left'])) and ((not isRed(root['left']['left'])))):\n root = moveRedLeft(root)\n root['left'] = deleteMinTree(root['left'])\n root = balance(root)\n return root\n\n except Exception as exp:\n error.reraise(exp, 'RBT:deleteMinTree')", "def prune_trivial_subtrees(self):\n num_pruned = 0\n if not self.is_leaf:\n children_classes = set()\n num_trivial_children = 0\n for child_node in self.nodes:\n num_pruned += child_node.prune_trivial_subtrees()\n if child_node.is_leaf:\n num_trivial_children += 1\n children_classes.add(child_node.most_common_int_class)\n if num_trivial_children == len(self.nodes) and len(children_classes) == 1:\n self.is_leaf = True\n num_pruned += num_trivial_children\n self.nodes = []\n return num_pruned", "def truncation(self) -> int:\n return self._truncate", "def filter_rows_by_non_empty_until(df, max_=MAX_NUM_ROWS):\n print('Starting shape: %s' % str(df.shape))\n threshold = 1\n while df.shape[0] > max_:\n df = filter_rows_by_non_empty(df, threshold=threshold)\n print('THRESHOLDING: to shape: %s' % str(df.shape))\n threshold += 1\n print('Ending shape: %s' % str(df.shape))\n return df", "def max_tree_depth(self):\n\n depths = np.array([leaf.tree_depth for leaf in self.leaves])\n\n return depths.max()", "def fit_tree_stump_forest(X_train: np.ndarray, y_train: np.ndarray, n_estimators: int) -> RandomForestClassifier:\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf = clf.fit(X_train, y_train)\n return clf", "def build_most_significant_tree(ktree, outputdir):\n stree = Tree()\n root = max(ktree.parents.keys())\n for cpt, popval in ktree.population.items():\n if popval == 1:\n stree.population[cpt] = 1\n _iter_build_most_significant_tree(ktree, stree, root)\n with open(os.path.join(outputdir, \"most_significant_tree.p\"), \"wb\") as f:\n pickle.dump(stree, f)\n return stree", "def tree(branchLen):\n if branchLen > 5:\n t.backward(branchLen)\n t.right(20)\n tree(branchLen-16,t)\n t.left(40)\n tree(branchLen-16,t)\n t.right(20)\n t.forward(branchLen)", "def option_max_children(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionMaxChildren/')))", "def max_tree_id(self) -> int:\n\n return max(self.tree_ids) if len(self.tree_ids)>0 else 0", "def remove_short_roots(self):\n\n # Proportion of the branch point's radius that the total length has to be to avoid removal.\n # Lower multipliers remove less incorrect roots, but also don't incorrectly remove real roots\n radius_multiplier = 0\n\n edge_roots = list()\n\n for root in self.root_dict.values():\n if not root.branches_at_endpoint:\n edge_roots.append(root)\n\n while edge_roots:\n\n next_root_list = list()\n\n for root in edge_roots:\n\n if root and len(root.pixel_list) < radius_multiplier * root.pixel_list[0].radius and root.parent_root:\n\n self.remove_pixels(root.pixel_list)\n\n parent = root.remove_edge_root()\n if parent and not parent.branches_at_endpoint:\n next_root_list.append(parent)\n\n self.root_dict.pop(root.key, None)\n\n edge_roots = next_root_list", "def deep_max(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[-1]\r\n return node.keys[-1] if node.keys else None", "def cut_frame_tail(df):\n # TODO\n return df", "def max(self):\n return self.root.rightmost", "def max_stack(self):\n if self.maxx == []:\n return None\n return self.maxx[-1]", "def recoverTree(self, root: TreeNode) -> None:\n if not root:\n return\n if root.left and root.left.val > root.val:\n root.left.val, root.val = root.val, root.left.val\n return\n if root.right and root.right.val < root.val:\n root.right.val, root.val = root.val, root.right.val\n return\n self.recoverTree(root.left)\n self.recoverTree(root.right)", "def tree_size(self):\n if self._tree_size is not None:\n return self._tree_size\n if self.is_root:\n self.arbor._setup_tree(self)\n # pass back to the arbor to avoid calculating again\n self.arbor._store_node_info(self, '_tree_size')\n else:\n self._tree_size = len(list(self[\"tree\"]))\n return self._tree_size", "def truncate(text=\"\", max_len=50):\n return text if len(text) < max_len else text[:max_len]", "def generate_tree(self, max_depth = None):\n\n if max_depth is None:\n max_depth = self.tree.max_depth\n else:\n max_depth -= 1\n if max_depth == 0:\n return\n self.generate_children()\n if self.tree.remove:\n os.unlink(self.source_filename)\n for child in self.children:\n if child.count > self.tree.max_count:\n child.generate_tree(max_depth)", "def setmaxsize(self, maxsize):\n self.maxsize = maxsize", "def maxlen(self):\n \n return reduce(max, list(map(len, self.tags)))", "def truncate(self, size=None):\n raise NotImplementedError(\"truncate() not supported\")", "def _setStacksize(self):\n assert self.stackLimit != None\n assert isinstance(self.stackLimit, int)\n import resource\n if self.stackLimit == 0:\n resource.setrlimit(resource.RLIMIT_STACK,\n (resource.RLIM_INFINITY, resource.RLIM_INFINITY))\n else:\n resource.setrlimit(resource.RLIMIT_STACK,\n (self.stackLimit, self.stackLimit))", "def get_uneaten_leaves(branch_length, caterpillars):\n\n if type(branch_length) != int:\n return None\n\n if branch_length <= 0:\n return 0\n\n if not caterpillars:\n return branch_length\n\n # Break up caterpillars\n prime_cats = get_prime_caterpillars(caterpillars)\n\n branch_break = prime_cats[-1] # Requires list is sorted\n multi = branch_length // branch_break\n remainder = branch_length % branch_break\n # Get \"whole-branch\" leaves\n leaf_c = get_remaining_leaves_from_branch(branch_break, prime_cats) * multi\n # Get partial-branch leaves, if any\n if remainder:\n leaf_c += get_remaining_leaves_from_branch(remainder, prime_cats)\n\n return leaf_c", "def remove_subtree(self, root: State):\n self.remove(root)\n for child in root.children:\n self.remove_subtree(child)" ]
[ "0.60658157", "0.5753063", "0.57320607", "0.5690253", "0.56196177", "0.557107", "0.5399001", "0.53606296", "0.5356624", "0.5334575", "0.53127694", "0.529486", "0.5244779", "0.52287835", "0.52241135", "0.5215633", "0.5206999", "0.51637185", "0.5151103", "0.5127064", "0.5113451", "0.5082578", "0.50764436", "0.50717384", "0.5068558", "0.5068275", "0.50567216", "0.50459456", "0.50371647", "0.50360316", "0.5019158", "0.50076234", "0.5000044", "0.49996275", "0.49813306", "0.49473855", "0.49461642", "0.4935134", "0.4935134", "0.49286658", "0.49163643", "0.49057853", "0.49044925", "0.48908156", "0.48792323", "0.48625684", "0.4859423", "0.4857279", "0.48438564", "0.48251334", "0.4814217", "0.4811256", "0.47979861", "0.47891486", "0.47773656", "0.477297", "0.47628847", "0.47578558", "0.4755491", "0.47539657", "0.47440672", "0.47423896", "0.4740629", "0.47385824", "0.4735043", "0.47304547", "0.4722394", "0.4720499", "0.47199735", "0.47048566", "0.4696932", "0.46964434", "0.46941498", "0.46888736", "0.46872306", "0.46815416", "0.4679398", "0.46733662", "0.4666847", "0.46612582", "0.46581164", "0.46486607", "0.46377507", "0.46375418", "0.46356133", "0.46344748", "0.4624474", "0.46234658", "0.46175036", "0.46134973", "0.46113467", "0.46076792", "0.46040764", "0.4603877", "0.4601214", "0.46009344", "0.4595395", "0.45928285", "0.45909292", "0.45883623" ]
0.7533811
0
Sorts based on a given item.
def SortOnItem(list_, item_loc): templist = [elmt[item_loc] for elmt in list_] index = np.argsort(templist) return [list_[i] for i in index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uasort(item, func):\n return sort(item, func)", "def execute(self, item):\n items = item[\"items\"]\n reverse = not item.get(\"descending\", True)\n items.sort(key=operator.itemgetter(\"distance\"), reverse=reverse)\n\n return item", "def SortChildren(self, item):\r\n\r\n children = item.GetChildren()\r\n \r\n if len(children) > 1:\r\n self._dirty = True\r\n children.sort(self.OnCompareItems)", "def SortChildren(self, item):\r\n\r\n if not self._attr_set:\r\n setattr(self._main_win, \"OnCompareItems\", self.OnCompareItems)\r\n self._attr_set = True\r\n \r\n self._main_win.SortChildren(item)", "def sortby(self):\n ...", "def sorted_index(self, item):\n return _(_sorted_index(self._, item))", "def sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def sorted_items_from_pages(cls, pages, item_key, sort_key):\n items = []\n for page in pages:\n items.extend(page[item_key])\n result = sorted(items, key=operator.itemgetter(sort_key))\n return result", "def _sort(self, row):\n if not self._head:\n self._head = self._create_head(row)\n if self._args.head:\n return row\n\n if 'key' not in self._state:\n self._state['key'] = self._replace_fields(self._args.key)\n\n r = list(map(self._convert, row))\n self._sorting_insert(self._result, r, key=lambda r: eval(self._state['key']))", "def SortItems(self,column=None,reverse='CURRENT'):\r\n #--Parse column and reverse arguments.\r\n data = self.data\r\n if self.sortDirty:\r\n self.sortDirty = False\r\n (column, reverse) = (None,'CURRENT')\r\n curColumn = data.defaultParam('colSort',data.tankColumns[0])\r\n column = column or curColumn\r\n curReverse = data.defaultParam('colReverse',{}).get(column,False)\r\n if reverse == 'INVERT' and column == curColumn:\r\n reverse = not curReverse\r\n elif reverse in ('INVERT','CURRENT'):\r\n reverse = curReverse\r\n data.updateParam('colReverse')[column] = reverse\r\n data.setParam('colSort',column)\r\n #--Sort\r\n items = self.data.getSorted(column,reverse)\r\n sortDict = dict((self.item_itemId[y],x) for x,y in enumerate(items))\r\n self.gList.SortItems(lambda x,y: cmp(sortDict[x],sortDict[y]))\r\n #--Done\r\n self.mouseTexts.clear()", "def _key_sorting(item):\n key, value = item\n if isinstance(value, Link):\n return (1, key)\n return (0, key)", "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items", "def sort_by_user_arg(user_argument = 'sort by ex. artist'):\n pass", "def sort(self, Items):\n # Pre:\n # 'Items' must be a mutable list of items of type 'T'.\n # Post:\n # Returns a list that contains all elements of 'Items' in sorted order. 'Items' may or may not become sorted, but will contain the same elements as before.\n self.quicksort(Items, 0, Items.count - 1)\n return Items", "def sorting(list_object): # Takes in a ListItem object and returns the\r\n # priority value - from w3schools.com\r\n return list_object.priority", "def folder_sort(request, item_container):\n return do_sort(request, item_container, 'folder', _(u'Ordner, Seiten etc. umordnen'))", "def sort_L3():\n for item in d_list:\n item.sort(key=operator.itemgetter(1))", "def smart_sort(item):\n try:\n return int(''.join(os.path.basename(item).split('.')[0:-1]))\n except (TypeError, ValueError, AttributeError):\n return item", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def sort_data(data):\n data.sort(key=itemgetter(3,2))\n return data", "def gnome_sort(items):\n i = 0\n n = len(items)\n while i < n:\n if i and items[i] < items[i-1]:\n items[i], items[i-1] = items[i-1], items[i]\n i -= 1\n else:\n i += 1\n return items", "def merge_sort(items):\r\n # TODO: Check if list is so small it's already sorted (base case)\r\n # TODO: Split items list into approximately equal halves\r\n # TODO: Sort each half by recursively calling merge sort\r\n # TODO: Merge sorted halves into one list in sorted order\r", "def insertion_sort(items, key):\n # if order == \"reverse\":\n # compare = operator.lt\n # elif order == \"normal\":\n # compare = operator.gt\n global COMPARE\n\n # Repeat until all items are in sorted order\n for index in range(len(items)):\n iterator = index\n\n # Take first unsorted item\n while COMPARE(key(items[iterator-1]), key(items[index])) and iterator > 0:\n iterator -= 1\n # Insert it in sorted order in front of items\n sorteditem = items.pop(index)\n items.insert(iterator, sorteditem)\n\n return items", "def sort_by(self, param):\n sorted(self.books_all, key=lambda k: k[param])\n return self.books_all", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def selection_sort(items):\n # TODO: Repeat until all items are in sorted order\n # TODO: Find minimum item in unsorted items\n # TODO: Swap it with first unsorted item\n for x in range(len(items)):\n smallest_index = x\n if x!=len(items)-1:\n for y in range(x+1, len(items)):\n if items[y] < items[smallest_index]:\n smallest_index = y\n temp = items[x]\n items[x] = items[smallest_index]\n items[smallest_index] = temp", "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def selection_sort(items):\n # Repeat until all items are in sorted order\n # Find minimum item in unsorted items\n # Swap it with first unsorted item\n current = 0\n minimum = 0\n first = 0\n while not is_sorted(items):\n if items[current] < items[minimum]:\n minimum = current\n\n elif current == len(items) - 1:\n items[minimum], items[first] = items[first], items[minimum]\n first += 1\n current = first\n minimum = first\n \n else:\n current += 1", "def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n\n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n\n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items", "def quick_sort(items):\n if len(items) &gt; 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n \n for i, val in enumerate(items):\n if i != pivot_index:\n if val &lt; items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n \n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items", "def bubble_sort(items):\n # TODO: Repeat until all items are in sorted order\n # TODO: Swap adjacent items that are out of order\n for x in range(len(items)-1):\n if items[x]>items[x+1]:\n temp = items[x]\n items[x] = items[x+1]\n items[x+1] = temp", "def sort(self, *args, **kwargs):\n self._sequence.sort(*args, **kwargs)", "def sortinplace(items):\n # filter out indexes for both types, use comprehensions for readability\n # and compactness since input is small\n iidx = [i for i, item in enumerate(items) if item.strip('-').isdigit()]\n oidx = [i for i, item in enumerate(items) if not item.strip('-').isdigit()]\n\n # Sort indexes based on the value it points to\n siidx = sorted(iidx, key=lambda idx: int(items[idx]))\n soidx = sorted(oidx, key=lambda idx: items[idx])\n\n # Map the positions pre and post sort, merge the result and sort again\n # based on the pre sort keys.\n msidx = sorted(zip(iidx, siidx) + zip(oidx, soidx), key=getter(0))\n\n return [items[i[1]] for i in msidx]", "def insertion_sort(items):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items\n for x in range(1, len(items)):\n swap = items[x]\n y = x-1\n while items[y] > swap and y>=0:\n items[y+1] = items[y]\n y-=1\n items[y+1] = swap", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def reorder_list(items, arg=''):\n\n if arg:\n\n for i in items:\n if i == arg:\n items.remove(i)\n items.insert(0, arg)\n\n return items", "def sort(self, *pargs, **kwargs):\n if self._can_sort(*pargs, **kwargs):\n list.sort(self, *pargs, **kwargs)", "def role_reorder_valid_roles_sort_key(item):\n return item[1]", "def sort(self):\n # sort the contents of the container alphabetically\n # this is done automatically whenever an item is added/removed from the Container\n self.items.sort(key=lambda item: item.name)", "def custom_sort(arr):\n pass", "def _sort_nodes(cls: Type, lst: List[Dict[str, Any]],\n by: str = 'item_title'):\n assert type(lst) == list\n lst.sort(key=lambda n: n[by])\n for n in lst:\n if 'nodes' in n:\n cls._sort_nodes(n['nodes'], by)", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def bubble_sort(items):\n # Repeat until all items are in sorted order\n # Swap adjacent items that are out of order\n current = 0\n right = 1\n while not is_sorted(items):\n if current == len(items) - 1:\n current = 0\n right = 1\n\n elif items[current] > items[right]:\n items[current], items[right] = items[right], items[current]\n \n else:\n current += 1\n right += 1", "def sort(self, *args, **kargs):\n list.sort(self, *args, **kargs)\n self.emit('modified')", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def resort(self):\n self.items.sort(key=lambda node: node.path_weight, reverse=True)", "def insertion_sort(items):\n for i in range(1, len(items)):\n j = i\n while j > 0 and items[j] < items[j-1]:\n items[j], items[j-1] = items[j-1], items[j]\n j -= 1", "def do_sort(request, item_container, app_name, my_title, app_types=[]):\n\n class dms_itemForm ( forms.Form ) :\n sections = forms.CharField(required=False,\n widget=forms.Textarea( attrs={'rows':5, 'cols':40,\n 'style':'width:50%;'}) )\n\n my_item = item_container.item\n objs = []\n has_user_folder = False\n user_perms = UserEditPerms(request.user.username,request.path)\n\n # wurden Zwischentitel geloescht, ergaenzt, umgeordnet?\n if request.POST.has_key('sections_form'):\n s = request.POST['sections']\n if s != item_container.container.sections:\n item_container.container.sections = s\n item_container.container.save()\n change_values = request.POST.has_key('drag_item_form')\n if change_values:\n items, sections, d_sections = get_folder_content(item_container, False, app_types)\n order_by_ids, new_sections_str = do_resort(request.POST['var_order_by_0'])\n n = 0\n c = []\n for i in items:\n if app_types==[] or i.item.app.name in app_types:\n has_changed = False\n if order_by_ids[i.item.id][0] != i.order_by:\n i.order_by = order_by_ids[i.item.id][0]\n has_changed = True\n sec = order_by_ids[i.item.id][1]\n if sec != i.section:\n i.section = sec\n has_changed = True\n if has_changed:\n i.save()\n # --- wurde die Reihenfolge der Zwischentitel geaendert?\n n = new_sections_str\n if item_container.container.sections != n:\n item_container.container.sections = n\n item_container.container.save()\n items, sections, d_sections = get_folder_content(item_container, False, app_types)\n js_head, drag_list, input_str, n_drag_titles = get_drag_list(sections, d_sections)\n max_items = len(items)\n # --- Zwischentitel\n data_init = {'sections' : decode_html(item_container.container.sections,) }\n f = dms_itemForm(data_init)\n tabs = [('tab_sections' , ['sections',]), ]\n sec_content = get_tabbed_form(tabs, help_form, 'lecture' , f, False)\n\n vars, user_perms = get_base_vars(request, item_container, 'frame-main-manage')\n v = { 'objs' : objs,\n 'js_head' : js_head,\n 'drag_list' : drag_list,\n 'input_str' : input_str,\n 'max_items' : n_drag_titles,\n 'id' : my_item.id,\n 'title' : my_title,\n 'sub_title' : my_item.title,\n 'name' : my_item.name,\n 'action' : get_folderish_actions(request, user_perms, item_container, app_name,\n item_container.item.has_comments,\n {'browseable_mode': False,\n 'navigation_mode': False}),\n 'sec_content': sec_content\n }\n vars.update(v)\n vars['image_url'] = ''\n vars['text'] = ''\n vars['text_more'] = ''\n return render_to_response ( 'app/base_sort.html', vars )", "def sort(self, *args: Any, **kwargs: Any) -> BaseList:\n super().sort(*args, **kwargs)\n return self", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def multikeysort(items, columns):\n comparers = [((itemgetter(col[1:].strip()), 1) if col.startswith('-') else\n (itemgetter(col.strip()), -1)) for col in columns]\n\n def comparer(left, right):\n for _fn, mult in comparers:\n result = ((_fn(left) > _fn(right)) - (_fn(left) < _fn(right)))\n if result:\n return mult * result\n return None\n\n return sorted(items, key=cmp_to_key(comparer))", "def sort_1(l):\n pass", "def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)", "def insertion_sort(items):\n # Repeat until all items are in sorted order\n # Take first unsorted item\n # Insert it in sorted order in front of items\n sorted_index = 1\n while not is_sorted(items):\n num = items.pop(sorted_index)\n \n back_index = sorted_index - 1\n for back_num in items[sorted_index-1::-1]:\n if num > back_num:\n items.insert(back_index + 1, num)\n break\n\n back_index -= 1\n else:\n items.insert(0, num)\n \n sorted_index += 1", "def sort(self, new_arrangement):\n\t\tself.order_dict = self._build_dict(new_arrangement)\n\t\tself.swap_idx = self.arrangement.index(0)\n\t\tself._move_swap_el(len(new_arrangement)-1)\n\t\tl = self._quicksort(0, len(new_arrangement)-2)\n\t\tself._move_swap_el(self.order_dict[0])", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items)-1-i):\n if items[j] > items[j+1]:\n items[j], items[j+1] = items[j+1], items[j] # Swap!", "def sort(self):\n # Base Case\n # If the robot has reached the end of the list and his light is off (no swaps have occurred),\n if self.can_move_right() == False and self.light_is_on() == False:\n return\n\n # Grab the first card\n self.swap_item()\n\n # While the robot is still able to move right,\n while self.can_move_right():\n\n # Move right\n self.move_right()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is greater than what he is holding (-1), swap items\n if self.compare_item() == -1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once the robot can no longer move right, he is at the end of the list and holding the largest value\n # Swap items\n self.swap_item()\n\n # Now the robot needs to traverse back to index 0, grabbing the smallest value as he goes\n # Follow the same logic as when he moved right with the largest value\n\n # If he hits a empty slot in the list, everything in front of it has been sorted\n # He doesn't need to sort anymore, he is holding the smallest value left to be sorted. \n # Put it in the blank spot and turn to move back in the other direction\n\n while self.compare_item() is not None:\n\n # Move left\n self.move_left()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is less than what he is holding (1), swap items\n if self.compare_item() == 1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once self.compare_item() is None, that means he is in front of a blank space\n # - everything to the left of the blank space has already been sorted\n # Deposit what he is holding\n self.swap_item()\n\n # Reset the light to the off position\n self.set_light_off()\n\n # Move one spot over to the right\n self.move_right()\n\n # Re-run the process all over again\n self.sort()", "def bubble_sort(items):\n out = items.copy() # in place protection on items\n for i in range(len(out)):\n for j in range(len(out)-1-i):\n if out[j] > out[j+1]:\n out[j], out[j+1] = out[j+1], out[j] # Swap!\n\n return out", "def selection_sort(items):\n # Repeat until all items are in sorted order\n # Find minimum item in unsorted items\n # Swap it with first unsorted item\n # TODO: Running time: ??? Why and under what conditions?\n # TODO: Memory usage: ??? Why and under what conditions?\"\"\"\n # pdb.set_trace()\n while not is_sorted(items):\n for i in range(len(items) - 1):\n # setting the minimum to start with\n min = i\n # Start looping from the current index i\n for j in range(i + 1, len(items)):\n # if j is less than our current minimum\n if items[j] < items[min]:\n # set j to our minimum\n min = j\n # Once loop is done set i to be our minimum\n items[i], items[min] = items[min], items[i]\n return items", "def _sort_by_name(bam_fn):", "def apply_sorting(tasks, *conditions):\n return tasks.sort(conditions)", "def gallery_sort(request, item_container):\n\n return do_sort(request, item_container, 'pool', _(u'Bilder umordnen'))", "def selection_sort(items):\n\n n = len(items)\n for j in range(n):\n # Find the index of the smallest item in the range(j,n)\n i_min = j\n for i in range(j + 1, n):\n if (items[i] < items[i_min]):\n i_min = i\n\n # Swap the items at j and i_min if needed.\n if i_min != j:\n items[j], items[i_min] = items[i_min], items[j]\n\n return items", "def custom_sort(pseudo):\n # Unpack\n pred = pseudo[\"pred_list\"]\n lab = pseudo[\"lab_list\"]\n name = pseudo[\"name_list\"]\n \n # Sort\n sorted_list = list(zip(pred, lab, name))\n sorted_list.sort(key=lambda x: x[0], reverse=True)\n \n pred_sorted = [row[0] for row in sorted_list]\n lab_sorted = [row[1] for row in sorted_list]\n name_sorted = [row[2] for row in sorted_list]\n \n # Re-pack\n pseudo = {\n \"pred_list\": pred_sorted,\n \"lab_list\": lab_sorted,\n \"name_list\": name_sorted\n }\n \n return pseudo", "def sorting(my_list):\n for indx in range(1,len(my_list)):\n i=indx\n while i>0:\n if my_list[i]<my_list[i-1]:\n temp=my_list[i-1]\n my_list[i-1]=my_list[i]\n my_list[i]=temp\n i=i-1\n return my_list", "def sort(self, args):\n if not args:\n self.err_print('One argument required')\n return\n\n _key = args[0]\n cur = self.ui.leftwin.highlighted().data\n try:\n ind = song.tags.index(_key)\n cur.change_sort(ind)\n self.ui.rightwin.disp()\n except:\n self.err_print('\"{}\" is not a valid key to sort by'.format(_key))", "def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sort(self, _cmp=None, key=None):\n if len(self) == 0:\n return\n\n if _cmp is not None:\n from functools import cmp_to_key\n from sage.misc.superseded import deprecation\n deprecation(21145, \"Please use 'key' to sort.\")\n self.__x.sort(key=cmp_to_key(_cmp))\n return\n\n if key is not None:\n self.__x.sort(key=key)\n return\n\n a = self.__x[0][0]\n sort_key = None\n if hasattr(a, 'dimension'):\n try:\n a.dimension()\n\n def sort_key(f):\n return (f[0].dimension(), f[1], f[0])\n except (AttributeError, NotImplementedError, TypeError):\n pass\n elif hasattr(a, 'degree'):\n try:\n a.degree()\n\n def sort_key(f):\n return (f[0].degree(), f[1], f[0])\n except (AttributeError, NotImplementedError, TypeError):\n pass\n\n if sort_key is None:\n\n def sort_key(f):\n return f[0]\n\n self.__x.sort(key=sort_key)", "def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal", "def _sort_column(self, column, reverse):\n if tk.DISABLED in self.state():\n return\n # get list of (value, item) tuple where value is the value in column for the item\n l = [(self.set(child, column), child) for child in self.get_children('')]\n # sort list using the column type\n l.sort(reverse=reverse, key=lambda x: self._column_types[column](x[0]))\n # reorder items\n for index, (val, child) in enumerate(l):\n self.move(child, \"\", index)\n # reverse sorting direction for the next time\n self.heading(column, command=lambda: self._sort_column(column, not reverse))", "def _natsort_key_case_insensitive(item):\r\n # added the lower() call to allow for case-insensitive sorting\r\n item = str(item).lower()\r\n\r\n try:\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item)\r\n except TypeError:\r\n # if item is a tuple or list (i.e., indexable, but not a string)\r\n # work with the first element\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item[0])\r\n for ii in range(len(chunks)):\r\n if chunks[ii] and chunks[ii][0] in '0123456789':\r\n if '.' in chunks[ii]:\r\n numtype = float\r\n else:\r\n numtype = int\r\n # wrap in tuple with '0' to explicitly specify numbers come first\r\n chunks[ii] = (0, numtype(chunks[ii]))\r\n else:\r\n chunks[ii] = (1, chunks[ii])\r\n return (chunks, item)", "def insertion_sort(items):\n # Repeat until all items are in sorted order\n # Take first unsorted item\n # Insert it in sorted order in front of items\n # TODO: Running time: ??? Why and under what conditions?\n # TODO: Memory usage: ??? Why and under what conditions?\"\"\"\n # NOTE: Need to be able to sort multiples\n while not is_sorted(items):\n # \n for i in range(len(items)-1):\n # Loop through the list in reversal until you get to 0\n for j in range(i, - 1, -1):\n # If left index is bigger than right index\n if items[j] > items[j + 1]:\n # Then swap\n items[j], items[j + 1] = items[j + 1], items[j]\n else:\n # Break once the item is not bigger and insert\n break\n return items", "def split_sort_merge(items):\n # TODO: Split items list into approximately equal halves\n pivot = len(items) // 2\n # TODO: Sort each half using any other sorting algorithm\n # sort first half in-place (insertion sort)\n left = insertion_sort(items[:pivot])\n\n right = insertion_sort(items[pivot:])\n # TODO: Merge sorted halves into one list in sorted order\n # merge the two half list (merge function but this does this in-place)\n sorted_list = merge(left, right)\n # change the input items\n items[:] = sorted_list\n return items", "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def qsort2(items):\n if len(items) == 0:\n return []\n\n if len(items) == 1:\n return [ items[0] ]\n\n pivot = items[0]\n return qsort2([x for x in items if x < pivot])\\\n + [pivot]\\\n + qsort2([x for x in items if x > pivot])", "def sort(self, field=None, asc=True, castFunction=None):\n if field is None or field == '':\n # sort the keys\n self._sequence.sort()\n return\n\n def cast(value):\n if not castFunction:\n return value\n else:\n return castFunction(value)\n\n def innerCmp(a,b):\n order = 1\n if asc == False:\n order = -1\n return cmp(cast(self.dictionary[a][field]), cast(self.dictionary[b][field])) * order\n\n self._sequence.sort(innerCmp)", "def merge_sort(items):\n # TODO: Check if list is so small it's already sorted (base case)\n # TODO: Split items list into approximately equal halves\n # TODO: Sort each half by recursively calling merge sort\n # TODO: Merge sorted halves into one list in sorted order\n if len(items) > 1:\n pivot = len(items) // 2\n right = merge_sort(items[pivot:])\n left = merge_sort(items[:pivot])\n sorted_list = merge(left, right)\n else:\n sorted_list = items\n # change the input items \n items[:] = sorted_list\n return items", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def OnCompareItems(self, item1, item2):\r\n\r\n return cmp(self.GetItemText(item1), self.GetItemText(item2))", "def sort_list(self,list_):\r\n list_.sort()", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def item_comparer(self, value):\n self.item_comparer_value = value", "def sort(self, quant=None):\n if quant is None: # sort bei weight\n self.__sortlist = [key for key, values in sorted(self.__quantile.items(), key=lambda items: sum((10^quantille * count for quantille, count in enumerate(items[1].values()))))]\n elif isinstance(quant, int):\n self.__sortlist = [key for key, values in sorted(self.__quantile.items(), key=lambda items: items[1][quant])]", "def bubble_sort(items):\n for num in range(len(items)-1,0,-1):\n for j in range(num):\n if items[j]>items[j+1]:\n temp = items[j]\n items[j] = items[j+1]\n items[j+1] = temp\n return items", "def getSorted(self,column,reverse):\n data = self.data\n items = data.keys()\n if column == 'Package':\n items.sort(reverse=reverse)\n elif column == 'Files':\n items.sort(key=lambda x: len(data[x].fileSizeCrcs),reverse=reverse)\n else:\n items.sort()\n attr = column.lower()\n if column in ('Package','Group'):\n getter = lambda x: object.__getattribute__(data[x],attr).lower()\n items.sort(key=getter,reverse=reverse)\n else:\n getter = lambda x: object.__getattribute__(data[x],attr)\n items.sort(key=getter,reverse=reverse)\n #--Special sorters\n if settings['bash.installers.sortStructure']:\n items.sort(key=lambda x: data[x].type)\n if settings['bash.installers.sortActive']:\n items.sort(key=lambda x: not data[x].isActive)\n if settings['bash.installers.sortProjects']:\n items.sort(key=lambda x: not isinstance(data[x],InstallerProject))\n return items", "def by_order(item):\n if \"Order\" in item:\n return item[\"Order\"]\n return 0", "def bubble_sort(items):\n for i in range(len(items)):\n for j in range(len(items) - 1 - i):\n if items[j] > items[j + 1]:\n items[j], items[j + 1] = items[j + 1], items[j]\n return items", "def bubble_sort(items):\n # Repeat until all items are in sorted order\n # Swap adjacent items that are out of order\n # loop through list\n # TODO: Running time: ??? Why and under what conditions?\n # TODO: Memory usage: ??? Why and under what conditions?\"\"\"\n while not is_sorted(items):\n for i in range(len(items) - 1):\n # Loop backwards avoiding the already sorted numbers\n for j in range(len(items) - 1 - i):\n # if left item is bigger than the right\n if items[j] > items[j + 1]:\n # Swap left and right \n items[j], items[j + 1] = items[j + 1], items[j]\n return items", "def sortkey(item):\n chrom, pos, ref, alt = item[0]\n if chrom.startswith('chr'):\n chrom = chrom[3:]\n if chrom.isdigit():\n chrom = int(chrom)\n return (chrom, pos, len(ref), len(alt))", "def sort_movies(self, keyword):\n self.movies.sort(key=attrgetter(keyword, \"title\"))", "def sort(self, key_func):\n pass", "def sort(self, *args, **kwargs) -> \"Actions\":\n self.actions.sort(*args, **kwargs)\n return self", "def sort_by_another(to_sort, basis):\n return [x for (y, x) in sorted(zip(basis, to_sort), key=lambda pair: pair[0])]", "def sort():\n return -1", "def sort_list(list, key):\r\n list.sort(lambda x,y: cmp(key(x), key(y))) # Python < 2.4 hack\r\n return list" ]
[ "0.73161393", "0.72464734", "0.6715392", "0.66951114", "0.63434577", "0.6228377", "0.61038965", "0.60990417", "0.60690224", "0.60291386", "0.60132074", "0.59880716", "0.5957402", "0.5946752", "0.5913124", "0.58741784", "0.5858698", "0.5855381", "0.5847765", "0.5800869", "0.5799536", "0.5782873", "0.57783204", "0.57320005", "0.5728032", "0.57265913", "0.5726365", "0.5718991", "0.57058305", "0.56956506", "0.56765777", "0.56637913", "0.56634176", "0.5661191", "0.56608975", "0.56608975", "0.563509", "0.563369", "0.56251293", "0.56246775", "0.56106985", "0.56072086", "0.5602028", "0.5602028", "0.5577207", "0.5575369", "0.5564898", "0.5564898", "0.55616707", "0.55354255", "0.55328697", "0.5517958", "0.55132854", "0.55090636", "0.55064344", "0.55042344", "0.5500927", "0.5497763", "0.549584", "0.54888177", "0.5488763", "0.5487125", "0.54757154", "0.5460459", "0.5453117", "0.54475754", "0.54323304", "0.5431377", "0.5426801", "0.5424203", "0.54097986", "0.5401764", "0.53990674", "0.53987867", "0.53713024", "0.5363387", "0.53627104", "0.5357387", "0.5356547", "0.534132", "0.5332923", "0.5332877", "0.53161615", "0.53123206", "0.5311995", "0.53080326", "0.5307233", "0.53066343", "0.5301391", "0.5299147", "0.5295088", "0.5291096", "0.52842295", "0.5283588", "0.5281942", "0.52691525", "0.5265645", "0.5262473", "0.52491623", "0.524866" ]
0.6484688
4
3. REPRODUCTION PHASE. The trees will produce seeds based on their relative fitness which will then be spread over the problem space. Each seed, in turn, will grow into a new tree depending on external factors. A linear increase in the number of seeds produced by the trees of the forest is considered from max_seeds for the tree with the lowest value to min_seeds for the one with the highest value (i.e. minimization problem).
def reproduce(self): def compute_seeds(fitness): """ Computes the number of seeds given a fitness value. """ seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \ (self.max_seeds-self.min_seeds) + self.min_seeds return round(seeds) # evaluates max and min fitness for current year max_fitness = max(tree[0] for tree in self.population) min_fitness = min(tree[0] for tree in self.population) # computes the number of seeds produced per tree for tree in self.population: tree[1].seeds = int(compute_seeds(tree[0]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def select(self):\n\n def truncate(self):\n \"\"\" Truncates forest to maximum number of trees. \"\"\"\n\n self.population = self.population[:self.max_number_trees]\n\n def SortOnItem(list_, item_loc):\n \"\"\" Sorts based on a given item. \"\"\"\n\n templist = [elmt[item_loc] for elmt in list_]\n index = np.argsort(templist)\n return [list_[i] for i in index]\n\n # adds current seedlings to forest\n for tree in self.seedlings:\n\n # if tree does not competes with another existing one, adds it\n if tree not in self.population:\n self.population.append(tree)\n\n # sorts the trees of the forest in ascending values - minimization\n self.population = SortOnItem(self.population, item_loc=0)\n\n # removes unfit trees from forest\n truncate(self)", "def generate(\n seeds=10,\n param_num_nodes=7,\n mode='train',\n param_dim=10,\n param_sel=100,\n param_mu=10,\n param_br=0.05,\n param_activity_wt=None,\n A=None,\n sp_to_id=None,\n min_coord=None,\n max_coord=None,\n org_pts=None,\n ):\n global dim, sel, mu, br, activity_wt, tree_lc, tree_rc, num_nodes\n\n dim=param_dim\n sel=param_sel\n mu=param_mu\n br=param_br\n activity_wt=param_activity_wt\n num_nodes = param_num_nodes\n\n sp_root = 0\n tree = None\n\n if mode == 'train':\n tree, tree_lc, tree_rc = generate_tree(sp_root, num_nodes)\n if param_activity_wt is None:\n # weights for the linear activity function\n num_wts = int(((dim * (dim + 1))/2) + 1)\n activity_wt = np.random.normal(0, 1, num_wts)\n\n if org_pts is None:\n org_pts = []\n # simulate data points\n # format: exampleID, species, values\n # region, species, coord1, coord2, ...., activity_value\n\n for i in tqdm(range(int(seeds))):\n pt_id = i\n\n # pick a random point of d-dimension\n rand_pt = np.random.uniform(min_coord, max_coord, dim)\n curr_pt = np.append([pt_id, sp_root], rand_pt)\n curr_activity = get_activity(modify_pt(rand_pt), activity_wt)\n # print('curr_pt:', curr_pt, 'curr_activity:', curr_activity); exit(0)\n org_pts.append(np.append(curr_pt, curr_activity))\n\n generated_points = []\n full_org_pts = []\n\n if mode == 'train':\n pool = Pool(16)\n sample_bag = pool.map(generate_bag, org_pts)\n for item in sample_bag:\n for val in item:\n val = list(val)\n full_org_pts.append(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n else:\n for val in org_pts:\n val = list(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n\n return generated_points, activity_wt, org_pts, full_org_pts, tree", "def test_rand_100_depth_remains_less_than_8():\n from bbst import Bst\n from random import shuffle\n max_depth = 0\n for x in range(10):\n rando = [x for x in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n tree_depth = tree.depth()\n if tree_depth > max_depth:\n max_depth = tree_depth\n assert max_depth == 8", "def run(self):\n population_p = self.create_population()\n population_p = self.sort_population(population_p)\n best_x = population_p[0]\n for k in range(self.iteration):\n population_r = []\n # random.shuffle(population_p)\n for i in range(0, self.population_length, 2):\n mother = 0\n father = 1\n children = [self.random_chromosome(), self.random_chromosome()]\n while (mother == father) or (children[0] in population_p) or (children[1] in\n population_p):\n mother = random.randint(0, self.population_length - 1)\n father = random.randint(0, self.population_length - 1)\n children = self.cross(population_p[mother], population_p[father])\n children[0] = self.mutate(children[0])\n children[1] = self.mutate(children[1])\n\n population_r.append(children[0])\n population_r.append(children[1])\n\n population_p = self.new_population(population_p, population_r)\n if self.fitness(population_p[0]) < self.fitness(best_x):\n best_x = population_p[0]\n\n # print(population_p)\n return best_x", "def compute_seeds(fitness):\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)", "def build_random_function(min_depth, max_depth):\n\n # your code goes here", "def simulate_graph(seed, cluster_sizes, del_factor, ins_factor):\n rand.seed(seed)\n cluster_boundaries = np.cumsum(cluster_sizes)\n print(\"#seed:\", seed)\n print(\"#deletion factor:\", del_factor)\n print(\"#insertion factor:\", ins_factor)\n optimal_costs = np.array([0])\n for c in range(0, len(cluster_sizes)-1):\n n_c = cluster_sizes[c+1]\n offset_c = cluster_boundaries[c]\n edges_c = generate_edges(n_c, offset_c)\n disturb_cluster(n_c, offset_c, edges_c, del_factor, optimal_costs)\n additional_edges(cluster_boundaries, ins_factor, optimal_costs)\n print(\"#optimal costs:\", optimal_costs)", "def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1", "def test_random_forest_max_depth_parameter(params, X_train, X_test, y_train, y_test):", "def greedy_build(nodes, priors=None, cutoff=200, considered=set(), uniq='', targets=[]):\n\n\t# Tracks frequency of states for each character in nodes\n\tcharacter_mutation_mapping = defaultdict(int)\n\n\t# G models the network that is returned recursively\n\tG = nx.DiGraph()\n\n\troot = root_finder(nodes)\n\n\t# Base case check for recursion, returns a graph with one node corresponding to the root of the remaining nodes\n\tif len(nodes) <= cutoff or len(nodes) == 1:\n\t\troot = root_finder(nodes)\n\t\tG.add_node(root)\n\t\treturn G, [[root, nodes]]\n\n\t# Accounting for frequency of mutated states per character, in order to choose the best split\n\tfor node in nodes:\n\t\tnode_list = node.split(\"_\")[0].split('|')\n\t\tfor i in range(0, len(node_list)):\n\t\t\tchar = node_list[i]\n\t\t\tif char != '0' and char != '-':\n\t\t\t\tcharacter_mutation_mapping[(str(i), char)] += 1\n #if char != '0':\n # if char == \"-\":\n # character_mutation_mapping[(str(i), char)] -= 1\n # else:\n # character_mutation_mapping[(str(i), char)] += 1\n\n\t# Choosing the best mutation to split on (ie character and state)\n\tcharacter, state = 0, 0\n\tmax_cost = 0\n\n\tmin_prior = 1\n\tif priors:\n\t\tfor i in priors.keys():\n\t\t\tfor j in priors[i].keys():\n\t\t\t\tmin_prior = min(min_prior, priors[i][j])\n\n\tfor i,j in character_mutation_mapping:\n\t\tif not (i,j) in considered:\n\t\t\tif not priors:\n\t\t\t\tif max_cost < character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\t\t\telse:\n\t\t\t\tif j not in priors[int(i)]:\n\t\t\t\t\tpriors[int(i)][j] = min_prior\n\t\t\t\tif max_cost < -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]:\n\t\t\t\t\tmax_cost = -np.log(priors[int(i)][j]) * character_mutation_mapping[(i, j)]\n\t\t\t\t\tcharacter, state = i, j\n\tcharacter = int(character)\n\n\n\t# If there is no good split left, stop the process and return a graph with the remainder of nodes\n\tif character == 0 and state == 0:\n\t\tif len(nodes) == 1:\n\t\t\tG.add_node(nodes[0])\n\t\telse:\n\t\t\tfor i in range(0, len(nodes)):\n\t\t\t\tif nodes[i] != root:\n\t\t\t\t\tG.add_edge(root, nodes[i])\n\t\treturn G, []\n\n\t# Splitting nodes based on whether they have the mutation, don't have the mutation, or are NA('-') in that character\n\t# Right split is where nodes with the mutation go, everyone else goes to left split or NA chars\n\tleft_split, right_split, NA_chars = [], [], []\n\tright_split_temp = []\n\tleft_split_temp = []\n\tfor node in nodes:\n\t\tnode_list = node.split('|')\n\t\tif node_list[character] == state:\n\t\t\tright_split.append(node)\n\t\telif node_list[character] == '-':\n\t\t\tNA_chars.append(node)\n\t\telse:\n\t\t\tleft_split.append(node)\n\n\n\t# Seperates all nodes with NA in the character chosen to be split upon\n\t# Puts in right split or left split based on which list shares more mutated characters with this string\n\tfor node in NA_chars:\n\t\tright_split_score = 0\n\t\tleft_split_score = 0\n\t\tnode_list = node.split('|')\n\t\tnum_not_missing = len([n for n in node_list if n != \"-\"])\n\t\tfor i in range(0, len(node_list)):\n\t\t\tif node_list[i] != '0' and node_list[i] != '-':\n\t\t\t\tfor node_2 in left_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tleft_split_score += 1\n\t\t\t\tfor node_2 in right_split:\n\t\t\t\t\tnode2_list = node_2.split('|')\n\t\t\t\t\tif node_list[i] == node2_list[i]:\n\t\t\t\t\t\tright_split_score += 1\n\n\t\tavg_left_split_score = left_split_score / float(len(left_split) * num_not_missing + 1)\n\t\tavg_right_split_score = right_split_score / float(len(right_split) * num_not_missing + 1)\n\n\t\tif avg_left_split_score < avg_right_split_score:\n\t\t\tright_split_temp.append(node)\n\t\telse:\n\t\t\tleft_split_temp.append(node)\n\n\tright_split += right_split_temp\n\tleft_split += left_split_temp\n\n\t# Add character, state that split occurred to already considered mutations\n\tconsidered.add((str(character), state))\n\tG = nx.DiGraph()\n\t#splitter = str(character) + \" \" + str(state) + \" (\" + uniq + \")\"\n\tsplitter = root\n\n\t# Recursively build left side of network (ie side that did not mutation at the character with the specific state)\n\tG.add_node(splitter)\n\tleft_subproblems = []\n\tleft_network = None\n\tif len(left_split) != 0:\n\t\tleft_root = root_finder(left_split)\n\t\t# if left_root not in left_split and left_root in targets:\n\t\t# \tleft_root = left_root + \"_unique\"\n\n\t\tleft_network, left_subproblems = greedy_build(left_split, priors, cutoff, considered.copy(), uniq + \"0\", targets=targets)\n\n\t\tleft_nodes = [node for node in left_network.nodes() if left_network.in_degree(node) == 0]\n\t\tdup_dict = {}\n\t\tfor n in left_network:\n\t\t\tif n in list(G.nodes()) and n != left_root:\n\t\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(left_root.encode('utf-8')).hexdigest())\n\t\tleft_network = nx.relabel_nodes(left_network, dup_dict)\n\t\tG = nx.compose(G, left_network)\n\t\tif root != left_root:\n\t\t\tG.add_edge(splitter, left_root, weight=0, label=\"None\")\n\n\t# Recursively build right side of network\n\tright_network, right_subproblems = greedy_build(right_split, priors, cutoff, considered.copy(), uniq + \"1\", targets=targets)\n\tright_nodes = [node for node in right_network.nodes() if right_network.in_degree(node) == 0]\n\tright_root = root_finder(right_split)\n\n\tdup_dict = {}\n\tfor n in right_network:\n\t\tif n in list(G.nodes()) and n != right_root:\n\t\t\tdup_dict[n] = n + \"_\" + str(hashlib.md5(right_root.encode('utf-8')).hexdigest())\n\tfor n in dup_dict:\n\t\trename_dict = {n: dup_dict[n]}\n\t\tif right_network.out_degree(n) != 0:\n\t\t\tright_network = nx.relabel_nodes(right_network, rename_dict)\n\t\telse:\n\t\t\trename_dict = {n: dup_dict[n]}\n\t\t\tG = nx.relabel_nodes(G, rename_dict)\n\n\tG = nx.compose(G, right_network)\n\t# if right_root not in right_split and right_root in targets:\n\t# \tright_root = right_root + \"_unique\"\n\t#for node in right_nodes:\n\tif root != right_root:\n\t\tif not priors:\n\t\t\tG.add_edge(splitter, right_root, weight=1, label = str(character) + \": 0 -> \" + str(state))\n\t\telse:\n\t\t\tG.add_edge(splitter, right_root, weight=-np.log(priors[int(character)][state]), label=str(character) + \": 0 -> \" + str(state))\n\n\n\treturn G, left_subproblems + right_subproblems", "def expand_tree(self, N=1):\n # type: (int) -> None\n assert self._initialized, 'Search not initialized.'\n for _ in range(N): \n x_rand = self.sample_free()\n x_nearest = self.nearest(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n if self.coll_free(x_nearest, x_new):\n self.index+=1\n X_near = [x for x in self.near(x_new) if self.coll_free(x, x_new)]\n cost_min = self.costs[self.research_index(self.nodes,x_nearest)][1] + self.dist(x_nearest, x_new)\n x_min = x_nearest\n for x in X_near:\n cost = self.costs[self.research_index(self.nodes,x)][1] + self.dist(x, x_new)\n if cost < cost_min:\n cost_min = cost\n x_min = x\n \n self.nodes.append(x_new)\n j=self.research_index(self.nodes,x_min)\n self.parents[self.index,j]=1\n self.costs[self.index] = (x_new,self.costs[j][1] + self.dist(x_min, x_new))\n for x in X_near:\n k=self.research_index(self.nodes,x)\n if self.costs[self.index][1] + self.dist(x_new, x) < self.costs[k][1]:\n self.parents[self.index]=np.zeros(self.N)\n self.parents[self.index,k] = 1\n self.costs[k] = (self.costs[k][0],self.costs[self.index][1] + self.dist(x_new, x))", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def initialisation(Rsize, config, n_global_in, n_global_out, ke):\n # Creating population of Rsize*Rsize new random individuals\n # population = [[Individual(config, n_global_in, n_global_out)]*Rsize for _ in range(Rsize)]\n reef = [Individual(config, n_global_in, n_global_out) for _ in range(Rsize * Rsize)]\n print \"Reef created with \" + str(len(reef)) + \" solutions\"\n print \"Original size: \" + str(len(reef))\n\n # Eval population\n\n reef, count_evaluations = eval_population(reef, ke)\n # for ind in reef:\n # print str(ind.fitness)\n\n # Calculating fitness mean and std deviation\n fitness = fitness_mean_std(reef)\n\n fitness_mean_validation = fitness[\"validation\"][\"mean\"]\n fitness_std_validation = fitness[\"validation\"][\"std\"]\n fitness_max_validation = fitness[\"validation\"][\"max\"]\n fitness_min_validation = fitness[\"validation\"][\"min\"]\n\n # Deleting corals according to formula\n # It is not the same that the depredation one\n # new_population = [[ind if initial_deletion_check(ind.fitness, fitness_mean, fitness_std) else None for ind in line ] for line in population]\n new_reef = [\n ind if initial_deletion_check(ind.fitness[\"accuracy_validation\"], fitness_mean_validation, fitness_std_validation) else None for\n ind in reef]\n\n print \"Population reduced to: \" + str(len(filter(lambda w: w is not None, new_reef))) + \" solutions\"\n\n # for ind in filter(lambda w: w is not None, new_reef):\n # print str(ind.fitness)\n\n return new_reef", "def survivors_selection(self):\n q = 5\n new_population = []\n for i in range(self._population_size):\n batch = []\n for j in range(q):\n r = random.randint(0, (self._child2population_ratio + 1) * self._population_size - 1)\n if r < self._population_size:\n batch.append(self._population[r])\n else:\n batch.append(self._children[r - self._population_size])\n new_population.append(self.select_best(batch))\n\n self._population = new_population", "def _mutate(self, tree, spread, dtype):\n\n # defines wrapper functions\n def uniform(lower, upper):\n \"\"\"\n Draws a random float number from a uniform distribution\n given by U[lower, upper].\n \"\"\"\n\n return lower + random.random() * (upper - lower)\n\n def normal(mean, std):\n \"\"\"\n Draws a random float number from a normal distribution\n with mean 'mu' and standard deviation 'sigma': N[mu, sigma].\n \"\"\"\n\n return random.gauss(mean, std)\n\n # creates a seedling based on the DNA of its mother tree\n new_tree = copy.deepcopy(tree)\n\n # trade-off between exploitation and exploration\n if (random.random() > self.epsilon):\n\n # mutates initial solution vector - i.e. local seeding\n for i in range(self.dim):\n if (random.random() < self.mut_proba):\n if (dtype == \"normal\"):\n new_tree.vector[i] += normal(0, spread)\n\n elif (dtype == \"uniform\"):\n new_tree.vector[i] += uniform(-1, 1)\n\n else:\n raise AttributeError(\"'dtype' must either be 'normal' or 'uniform'.\")\n\n else:\n\n # explores new region of the search space - i.e. global seeding\n new_tree = Tree(self.lower, self.upper)\n\n return new_tree", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def train(eps, ntrees, min_size, max_splits, nfeats_test, resample=True):\n # TODO your code here\n trees = []\n for _ in range(ntrees):\n # repeatedly add values from the list of expression profiles without removal to a set\n # (so there could be duplicate expression profiles in the set we are creating) until the size of the set\n # is equal to the size of the original list of profiles\n if resample:\n resampled_eps = []\n for _ in range(len(eps)):\n idx = random.randint(0, len(eps) - 1)\n resampled_eps.append(eps[idx])\n trees.append(\n ExpressionDecisionTree.train(resampled_eps, len(resampled_eps), min_size, max_splits, nfeats_test))\n else:\n trees.append(\n ExpressionDecisionTree.train(eps, len(eps), min_size, max_splits, nfeats_test))\n return ExpressionRandomForest(trees)", "def repair(self, population_size=POPULATION_SIZE, iterations=100):\n self.validate()\n\n population = self.initial_population(population_size)\n\n last_key = ast.dump(self.target_tree)\n\n for iteration in range(iterations):\n population = self.evolve(population)\n\n best_tree = population[0]\n fitness = self.fitness(best_tree)\n\n if self.log:\n print(f\"Evolving population: \"\n f\"iteration{iteration:4}/{iterations} \"\n f\"fitness = {fitness:.5} \\r\", end=\"\")\n\n if self.log >= 2:\n best_key = ast.dump(best_tree)\n if best_key != last_key:\n print()\n print()\n self.log_tree(f\"New best code (fitness = {fitness}):\",\n best_tree)\n last_key = best_key\n\n if fitness >= 1.0:\n break\n\n if self.log:\n print()\n\n if self.log and self.log < 2:\n self.log_tree(f\"Best code (fitness = {fitness}):\", best_tree)\n\n best_tree = self.reduce(best_tree)\n fitness = self.fitness(best_tree)\n\n self.log_tree(f\"Reduced code (fitness = {fitness}):\", best_tree)\n\n return best_tree, fitness", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def build_random_trees(rows, n_features, max_depth, min_size, n_trees, random_dataset_size):\n trees = []\n for tree_number in range(n_trees):\n print(\"Building tree number:\", tree_number, \"of\", n_trees)\n # Select random dataset from original dataset\n random_dataset = select_random_rows(rows, random_dataset_size)\n\n # Select random features (columns)\n random_features = []\n for random_feature in range (n_features):\n # generate random index number to pick column\n random_column = randrange(len(rows))\n random_features.append(random_column)\n # generate the random tree with randomly picked features (columns) and a random dataset\n random_tree = build_single_random_tree(random_dataset, random_features, max_depth, min_size, 1)\n # add to list of trees\n trees.append(random_tree)\n return trees", "def run(self, verbose=False):\n\n cost = {}; cost[\"best\"] = []; cost[\"mean\"] = []\n for i in range(self.max_iters):\n\n # prints out information at current cycle\n if verbose:\n print(\"Iteration: {}\".format(i),\n \"Fitness: {}\".format(self.forest[0][0]))\n\n # reproduction phase\n self.reproduce()\n\n # seed dispersal phase\n self.seedlings = []\n for tree in self.population:\n self.disperse(tree[1])\n tree[1].year += 1\n\n # selection phase\n self.select()\n\n # decays exploration parameters\n if (self.epsilon > 0):\n self.epsilon -= self.epsilon_decay\n\n # stores statistics and updates counter of iterations\n cost[\"best\"].append(self.population[0][0])\n cost[\"mean\"].append( sum( [ tree[0] for tree in self.population ] )\\\n / len(self.population) )\n self.iteration += 1\n\n return cost", "def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # while the termination criteria is not satisfied, makes another generation\n while not self.termination_criteria.satisfied(self.generation, time.time()-start_time, self.population):\n self.generation += 1\n #print str(self.generation)\n next_generation = []\n\n if self.elitism:\n # Keeps the 10% best individuals\n best_individuals = heapq.nsmallest(int(0.1*self.population_size), self.population, lambda individual: individual.get_fitness())\n next_generation += copy.deepcopy(best_individuals)\n\n # select genetic operation probabilistically\n # this is a roulette wheel selection\n operations = numpy.random.choice(['reproduction', 'crossover', 'mutation'], size=self.population_size, p=[self.reproduction, self.crossover, self.mutation]).tolist()\n individuals = numpy.random.choice(self.population, p=self.normalized_fitness, size=2*self.population_size, replace=True).tolist()\n\n while len(next_generation) < self.population_size:\n operation = operations.pop()\n individual = individuals.pop()\n individual.get_fitness() # enforce fitness calculation\n\n if operation == 'reproduction':\n next_generation.append(individual)\n elif operation == 'crossover':\n individual2 = individuals.pop()\n individual2.get_fitness() # enforce fitness calculation\n individual1, individual2 = individual.crossover(individual2)\n next_generation.append(individual1)\n next_generation.append(individual2)\n elif operation == 'mutation':\n individual1 = individual.mutate()\n next_generation.append(individual1)\n\n self.population = next_generation\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n\n mean = numpy.mean(self.population_fitness)\n std = numpy.std(self.population_fitness)\n min = self.population_fitness.min()\n\n info_mean = pandas.DataFrame([[self.generation, mean, min, std]], columns=[\"generation\", \"mean\", \"min\", \"std\"])\n self.generation_info = self.generation_info.append(info_mean, ignore_index=True)", "def stochastic_universal_selection(self, fitness, num_parents):\n\n fitness_sum = numpy.sum(fitness)\n if fitness_sum == 0:\n self.logger.error(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n raise ZeroDivisionError(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n probs = fitness / fitness_sum\n probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.\n probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.\n\n curr = 0.0\n\n # Calculating the probabilities of the solutions to form a roulette wheel.\n for _ in range(probs.shape[0]):\n min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]\n probs_start[min_probs_idx] = curr\n curr = curr + probs[min_probs_idx]\n probs_end[min_probs_idx] = curr\n probs[min_probs_idx] = 99999999999\n\n pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.\n first_pointer = numpy.random.uniform(low=0.0, \n high=pointers_distance, \n size=1)[0] # Location of the first pointer.\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_pointer = first_pointer + parent_num*pointers_distance\n for idx in range(probs.shape[0]):\n if (rand_pointer >= probs_start[idx] and rand_pointer < probs_end[idx]):\n parents[parent_num, :] = self.population[idx, :].copy()\n parents_indices.append(idx)\n break\n\n return parents, numpy.array(parents_indices)", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def __call__(self, s, n=1000):\n\n root = StateNode(None, s, self.game)\n \n if root.parent is not None:\n raise ValueError(\"Root's parent must be None.\")\n \n for _ in range(n):\n #selection\n node = _get_next_node(root, self.tree_policy)\n #simulation\n node.reward = self.default_policy(node)\n #print(node.reward)\n #back\n self.backup(node)\n \n root.reset(copy.deepcopy(self.game_bak))\n \n #for i in root.children:\n # print(root.children[i].__dict__)\n # for j in root.children[i].children:\n # print(root.children[i].children[j].__dict__)\n # print(\"=======\")\n return rand_max(root.children.values(), key=lambda x: x.q).action, rand_max(root.children.values(), key=lambda x: x.q).q", "def evolve(population, targetSum, targetProduct, retain=0.2, random_select=0.05, mutate=0.01):\n\n graded = [ ( fitness(x, targetSum,targetProduct), x ) for x in population]\n graded = [ x[1] for x in sorted(graded) ]\n retain_length = int(len(graded) * retain)\n parents = graded[:retain_length]\n\n # randomly add other individuals to promote genetic\n # diversity\n for individual in graded[retain_length:]:\n if random_select > random.random():\n parents.append(individual)\n\n # crossover parents to create offspring\n #print(\"starting on crossover\")\n desired_length = len(population) - len(parents)\n children = []\n while len(children) < desired_length:\n male = randint(0, len(parents) - 1)\n female = randint(0, len(parents) -1)\n if male != female:\n male = parents[male]\n female = parents[female]\n half = int(len(male) / 2)\n child = male[: half] + female[half:]\n children.append(child)\n\n # mutate some individuals\n #print(\"starting on mutation\")\n for individual in children:\n if mutate > random.random():\n half = int(len(individual) / 2 )\n pos_geneSum = randint(0, (half - 1))\n pos_geneProd = randint(half, (len(individual) - 1))\n tmp = individual[pos_geneSum]\n individual[pos_geneSum] = individual[pos_geneProd]\n individual[pos_geneProd] = tmp\n\n parents.extend(children)\n return parents", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def make_parents(self):\r\n self.parents = []\r\n \r\n for loopindex in range(0, int(self.population_size * 0.6)):\r\n while True:\r\n if loopindex < int(self.population_size * 6 / 15):\r\n parent = random.choice(self.best_districts)\r\n else:\r\n parent = random.choice(self.worst_districts)\r\n \r\n if parent not in self.parents:\r\n self.parents.append(parent)\r\n break", "def mutate(offspring, individuals, params, *args):\n\n prob_mut = params.get(\"prob_mutation\", 0.3)\n prob_stand = 1 / 3 * prob_mut\n prob_point = 1 / 3 * prob_mut\n prob_mono = prob_mut - prob_stand - prob_point\n prob_replace = prob_mut\n r = np.random.rand()\n\n for ind in offspring:\n if r <= prob_stand:\n # Standard mutation\n #\n # This picks a random subtree anywhere within the tree\n rand_node = choice(ind.nodes[1:])\n tree = ind.grow_tree(method=\"grow\", depth=rand_node.depth, ind=rand_node)\n rand_node.value = tree.value\n rand_node.roots = tree.roots\n\n # This picks a whole subtree at depth=1 under the linear node\n # rand_subtree = np.random.randint(len(ind.roots))\n # del ind.roots[rand_subtree]\n # ind.grow_tree(method=\"grow\", ind=ind)\n\n ind.nodes = ind.get_sub_nodes()\n\n elif r <= prob_point + prob_stand:\n # Small mutation\n for node in ind.nodes[1:]:\n if np.random.rand() < prob_replace and callable(node.value):\n value = choice(node.function_set)\n while node.value.__code__.co_argcount != value.__code__.co_argcount:\n value = choice(node.function_set)\n node.value = value\n elif np.random.rand() < prob_replace:\n node.value = choice(node.terminal_set)\n ind.nodes = ind.get_sub_nodes()\n\n elif r <= prob_mono + prob_point + prob_stand:\n # Mono parental\n swap_nodes = sample(ind.nodes[1:], 2)\n tmp_value = swap_nodes[0].value\n tmp_roots = swap_nodes[0].roots\n swap_nodes[0].value = swap_nodes[1].value\n swap_nodes[0].roots = swap_nodes[1].roots\n swap_nodes[1].value = tmp_value\n swap_nodes[1].roots = tmp_roots\n ind.nodes = ind.get_sub_nodes()\n\n else:\n pass", "def evolve(self, elitism='on', save='off', probability=0.05, rate=0.05):\n if self.state == 'dead':\n\n self.member_fitness = [self.members[i].fitness for i in range(self.size)]\n\n self.fittest_brain = self.members[self.member_fitness.index(max(self.member_fitness))]\n\n if save == 'on':\n self.fittest_brain.save_as('fittest_brain')\n\n self.total_population_fitness = sum(self.member_fitness)\n\n print('Total population fitness is %s' % (self.total_population_fitness))\n\n self.mating_pool = [[self.members[i]] * round(self.member_fitness[i] * 1000 / self.total_population_fitness) for i in range(self.size)]\n\n self.mating_pool = [brain for sublist in self.mating_pool for brain in sublist]\n\n self.children = []\n\n if elitism == 'on':\n\n self.children.append(self.fittest_brain)\n\n for i in range(self.size - 1):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n else:\n for i in range(self.size):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n\n self.members = self.children\n\n self.members[0].state = 'alive'\n\n self.state = 'alive'\n self.generation += 1\n\n else:\n print('Cannot evolve: some members are still alive')", "def fit(self, X:np.ndarray, e=0, improved=False):\n if e>=self.height_limit or len(X)<=1:\n self.n_nodes = self.n_nodes + 1\n return Tree(X,None,None,None,None,'ex')\n else:\n Q = np.arange(X.shape[1], dtype='int')\n q = np.random.choice(Q)\n q_min = X[:,q].min()\n q_max = X[:,q].max()\n if improved:\n p_list = np.random.uniform(q_min,q_max,5)\n best_p = q_max\n x_len = len(X)\n for p in p_list:\n X_left = X[np.where(X[:,q] < p)]\n X_right = X[np.where(X[:,q] >= p)]\n if min(len(X_left), len(X_right))<=5:\n best_p = p\n break\n if min(len(X_left), len(X_right))<x_len:\n best_p = p\n else:\n best_p = np.random.uniform(q_min,q_max)\n X_left = X[np.where(X[:,q] < best_p)]\n X_right = X[np.where(X[:,q] >= best_p)]\n self.n_nodes = self.n_nodes + 1\n self.root = Tree(None,q, best_p, self.fit(X_left,e+1), self.fit(X_right,e+1), 'in')\n return self.root", "def fill(self):\n # Fail fast if num_classes or num_features isn't set.\n _ = getattr(self, 'num_classes')\n _ = getattr(self, 'num_features')\n\n self.training_library_base_dir = getattr(\n self, 'training_library_base_dir', '')\n self.inference_library_base_dir = getattr(\n self, 'inference_library_base_dir', '')\n\n self.bagged_num_features = int(self.feature_bagging_fraction *\n self.num_features)\n\n self.bagged_features = None\n if self.feature_bagging_fraction < 1.0:\n self.bagged_features = [random.sample(\n range(self.num_features),\n self.bagged_num_features) for _ in range(self.num_trees)]\n\n self.regression = getattr(self, 'regression', False)\n\n # Num_outputs is the actual number of outputs (a single prediction for\n # classification, a N-dimenensional point for regression).\n self.num_outputs = self.num_classes if self.regression else 1\n\n # Add an extra column to classes for storing counts, which is needed for\n # regression and avoids having to recompute sums for classification.\n self.num_output_columns = self.num_classes + 1\n\n # Allow each tree to be unbalanced by up to a factor of 2.\n self.max_depth = (self.max_depth or\n int(2 * math.ceil(math.log(self.max_nodes, 2))))\n\n # The Random Forest literature recommends sqrt(# features) for\n # classification problems, and p/3 for regression problems.\n # TODO(thomaswc): Consider capping this for large number of features.\n self.num_splits_to_consider = (\n self.num_splits_to_consider or\n max(10, int(math.ceil(math.sqrt(self.num_features)))))\n\n # max_fertile_nodes doesn't effect performance, only training speed.\n # We therefore set it primarily based upon space considerations.\n # Each fertile node takes up num_splits_to_consider times as much\n # as space as a non-fertile node. We want the fertile nodes to in\n # total only take up as much space as the non-fertile nodes, so\n num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))\n # But always use at least 1000 accumulate slots.\n num_fertile = max(num_fertile, 1000)\n self.max_fertile_nodes = self.max_fertile_nodes or num_fertile\n # But it also never needs to be larger than the number of leaves,\n # which is max_nodes / 2.\n self.max_fertile_nodes = min(self.max_fertile_nodes,\n int(math.ceil(self.max_nodes / 2.0)))\n\n # We have num_splits_to_consider slots to fill, and we want to spend\n # approximately split_after_samples samples initializing them.\n num_split_initializiations_per_input = max(1, int(math.floor(\n self.num_splits_to_consider / self.split_after_samples)))\n self.split_initializations_per_input = getattr(\n self, 'split_initializations_per_input',\n num_split_initializiations_per_input)\n\n # If base_random_seed is 0, the current time will be used to seed the\n # random number generators for each tree. If non-zero, the i-th tree\n # will be seeded with base_random_seed + i.\n self.base_random_seed = getattr(self, 'base_random_seed', 0)\n\n return self", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def findApproxDepth(train, valid, mD=0, mS=0):\n print(\n \"Building a random set of small trees to geuss the max depth and min set size values\"\n )\n res = []\n tree = DecisionTree(train.randSubSet(120, True))\n r = 10\n s = 3\n if mD != 0:\n s = mD - 1\n r = 1\n for i in range(\n s,\n r + s,\n ):\n depth = i + 1 # depth = randint(2,(len(train[0])-1)*3)\n a = 2\n b = 15\n if mS != 0:\n a = mS\n b = mS + 1\n for min_size in range(a, b, 2):\n # min_size = randint(2,(len(train[0])-1)*2)\n tree.buildTree(depth, min_size, True)\n acc = testTreeF(tree, valid)\n res.append([depth, min_size, acc])\n print(\"%.2f\" % (100 * (i - s + 1) / r), \"percent done\")\n best = max(res, key=lambda r: r[-1])\n # res.sort(key=lambda r: r[-1])\n # for r in res:\n # print(r)\n print(\"found a depth of\", best[0], \"and min size of\", best[1])\n return best", "def get_random_depth_sample(n=8, depths=list(range(2,26,2)), num_samples=100):\n\n def get_states(start):\n frontier = [start]\n frontier_set = {start}\n explored = set()\n\n states = [False for _ in range(len(depths))]\n while not all(states):\n node = frontier.pop(0)\n frontier_set.remove(node)\n explored.add(node)\n\n children = node.get_children()\n\n # It's necessary to shuffle children to get a truly random sample; otherwise, the first child (always\n # produced from the parent by the same action) produced at a certain depth will always be selected,\n # and children produced by other actions will never be selected\n shuffle(children)\n\n for child in children:\n if child not in frontier_set and child not in explored:\n frontier_set.add(child)\n frontier.append(child)\n child.path_cost = node.path_cost+1\n index = depths.index(child.path_cost) if child.path_cost in depths else None\n if index is not None and not states[index]:\n states[index] = {'start': start.sequence, 'end': child.sequence}\n\n return states\n\n depth_sample = [[] for depth in range(len(depths))]\n\n for _ in range(num_samples):\n start = list(range(1,n+2))\n shuffle(start)\n start = PuzzleState(start, path_cost=0)\n\n states = get_states(start)\n print('\\rSet ' + str(_+1) + ' of ' + str(num_samples) + ' complete', end='', flush=True)\n list(map(list.append, depth_sample, states))\n\n return depth_sample", "def step(individuals, grammar, replacement, selection, fitness_function, best_ever):\n #Select parents\n parents = selection(individuals)\n #Crossover parents and add to the new population\n new_pop = []\n while len(new_pop) < GENERATION_SIZE:\n new_pop.extend(onepoint_crossover(*random.sample(parents, 2)))\n #Mutate the new population\n new_pop = list(map(int_flip_mutation, new_pop))\n #Evaluate the fitness of the new population\n evaluate_fitness(new_pop, grammar, fitness_function)\n #Replace the sorted individuals with the new populations\n individuals = replacement(new_pop, individuals)\n best_ever = max(best_ever, max(individuals))\n return individuals, best_ever", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def fit_tree_stump_forest(X_train: np.ndarray, y_train: np.ndarray, n_estimators: int) -> RandomForestClassifier:\n clf = RandomForestClassifier(n_estimators=n_estimators)\n clf = clf.fit(X_train, y_train)\n return clf", "def init_population(pop_size):\n population = []\n for md in range(2, MAX_DEPTH - 1):\n for _ in range(int(pop_size / 2)):\n t = Tree()\n t.random_tree(grow=True, max_depth=md) # Grow method \n population.append(t)\n for _ in range(int(pop_size / 2)):\n t = Tree()\n t.random_tree(grow=False, max_depth=md) # Full method\n population.append(t)\n return population", "def evolve(population, target_sum, target_mult, retain=0.2, random_select=0.05, mutate=0.009):\r\n graded = [ (fitness(x, target_sum, target_mult), x) for x in population ]\r\n graded = [ x[1] for x in sorted(graded) ]\r\n retain_length = int(len(graded) * retain)\r\n parents = graded[:retain_length]\r\n # randomly add other individuals to promote genetic # diversity\r\n for individual in graded[retain_length:]:\r\n if random_select > random():\r\n parents.append(individual)\r\n # crossover parents to create offspring\r\n desired_length = len(population) - len(parents)\r\n children = []\r\n while len(children) < desired_length:\r\n male = randint(0, len(parents)-1)\r\n female = randint(0, len(parents)-1)\r\n if male != female:\r\n male = parents[male]\r\n female = parents[female]\r\n half = int(len(male) / 2)\r\n child = male[:half] + female[half:]\r\n children.append(child)\r\n # mutate some individuals for individual in children:\r\n for individual in children:\r\n if mutate > random():\r\n pos_to_mutate = randint(0, len(individual)-1)\r\n # this mutation is not ideal, because it\r\n # restricts the range of possible values,\r\n # but the function is unaware of the min/max\r\n # values used to create the individuals\r\n individual[pos_to_mutate] = randint(min(individual), max(individual))\r\n parents.extend(children)\r\n return parents", "def main():\n t = []\n for i in range(1, 19):\n t.append(i)\n config = Config()\n config.DEBUG = True\n config['time_list']=t\n config['load_graphs_from_xml']=True\n\n defaults = dict(num_samples=100, max_depth=5, run=0, num_runs=1,num_trees=100, stat='logrank', split_stat='logrank', num_folds=None,exp='flood',\n verbose=True, folds=None, load_graphs_from_xml=True, time_list=t)\n for key, value in defaults.items():\n cur_value = config.get(key, None)\n # print(\"key={0}:cur_value={1}\".format(key,cur_value))\n config[key] = value if cur_value is None else cur_value\n config.DEBUG = True\n #loadExperimentFile(config, filename=experiment_Path, experiment_name=\"flood\")\n #config.parseOpts()\n print('Start Grow Forest')\n growForest(config)", "def fitness(individual, n_clusters=3, n_seeds=5):\n\n dataframe = common.scale_dataframe(individual)\n corr = abs(individual.dataframe.corr().iloc[0, 1])\n differences = []\n for seed in range(n_seeds):\n km = KMeans(n_clusters, random_state=seed).fit(dataframe)\n differences.append(silhouette_score(dataframe, km.labels_) - corr)\n\n best = max(differences)\n return best", "def run(self):\n values_to_set = self._load().get_initial_values()\n\n best_data = []\n worst_data = []\n found = False\n overall_nb_generations_done = 0\n restart_counter = 0\n\n while overall_nb_generations_done < self._max_nb_generations and not found:\n new_population = ga_utils.create_generation(self._population_size, values_to_set)\n\n nb_generations_done = 0\n remember_the_best = 0\n nb_generations_without_improvement = 0\n\n # Loop until max allowed generations is reached or a solution is found\n while nb_generations_done < self._max_nb_generations and not found:\n # Rank the solutions\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n best_score = best_solution.fitness()\n worst_score = ranked_population[-1].fitness()\n best_data.append(best_score)\n worst_data.append(worst_score)\n\n # Manage best value and improvements among new generations over time\n if remember_the_best == best_score:\n nb_generations_without_improvement += 1\n else:\n remember_the_best = best_score\n if 0 < self._restart_after_n_generations_without_improvement < nb_generations_without_improvement:\n print(\"No improvement since {} generations, restarting the program\".\n format(self._restart_after_n_generations_without_improvement))\n restart_counter += 1\n break\n\n # Check if problem is solved and print best and worst results\n if best_score > 0:\n print(\"Problem not solved on generation {} (restarted {} times). Best solution score is {} and \"\n \"worst is {}\".format(nb_generations_done, restart_counter, best_score, worst_score))\n # Not solved => select a new generation among this ranked population\n # Retain only the percentage specified by selection rate\n next_breeders = ga_utils.pick_from_population(ranked_population, self._selection_rate,\n self._random_selection_rate)\n\n children = ga_utils.create_children_random_parents(next_breeders, self._nb_children)\n new_population = ga_utils.mutate_population(children, self._mutation_rate)\n\n nb_generations_done += 1\n overall_nb_generations_done += 1\n else:\n print(\"Problem solved after {} generations ({} overall generations)!!! Solution found is:\".\n format(nb_generations_done, overall_nb_generations_done))\n best_solution.display()\n found = True\n print(\"It took {} to solve it\".format(tools.get_human_readable_time(self._start_time, time())))\n\n if not found:\n print(\"Problem not solved after {} generations. Printing best and worst results below\".\n format(overall_nb_generations_done))\n ranked_population = ga_utils.rank_population(new_population)\n best_solution = ranked_population[0]\n worst_solution = ranked_population[-1]\n print(\"Best is:\")\n best_solution.display()\n print(\"Worst is:\")\n worst_solution.display()\n\n graphics.draw_best_worst_fitness_scores(best_data, worst_data)", "def initial_population(self, size):\n return [self.target_tree] + \\\n [self.mutator.mutate(copy.deepcopy(self.target_tree))\n for i in range(size - 1)]", "def start_neuroevolution(x, y, x_test, y_test):\n\n connections = [(0, INPUT0, OUTPUT0), (1, INPUT1, OUTPUT0), (2, INPUT0, OUTPUT1), (3, INPUT1, OUTPUT1)]\n genotypes = [{0: True, 1: True, 2: True, 3: True} for d in xrange(5)]\n\n for its in xrange(0,5):\n print \"iteration\", its\n\n fitnesses = []\n # test networks\n for i in xrange(0,len(genotypes)):\n fitnesses.append(eval_fitness(connections, genotypes[i], x, y, x_test, y_test, run_id=str(its) + \"/\" + str(i)))\n\n # get indices of sorted list\n fitnesses_sorted_indices = [i[0] for i in reversed(sorted(enumerate(fitnesses), key=lambda x: x[1]))]\n\n print \"connections:\\n\"\n print connections\n for ra in xrange(0,len(fitnesses_sorted_indices)):\n print fitnesses[fitnesses_sorted_indices[ra]], genotypes[fitnesses_sorted_indices[ra]]\n\n # run evolutions\n # todo: fiddle with parameters, include size of network in fitness?\n new_gen = []\n # copy five best survivors already\n m = 5\n if m > len(fitnesses):\n m = len(fitnesses)\n\n for i in xrange(0,m):\n print \"adding:\", fitnesses[fitnesses_sorted_indices[i]], genotypes[fitnesses_sorted_indices[i]]\n new_gen.append(genotypes[fitnesses_sorted_indices[i]])\n\n for i in xrange(0,len(fitnesses_sorted_indices)):\n fi = fitnesses_sorted_indices[i]\n r = np.random.uniform()\n # select the best for mutation and breeding, kill of worst.\n if r <= 0.2:\n # mutate\n connections, gen = add_connection(connections, genotypes[i])\n new_gen.append(gen)\n r = np.random.uniform()\n if r <= 0.5:\n connections, gen = add_node(connections, genotypes[i])\n new_gen.append(gen)\n\n r = np.random.uniform()\n if r <= 0.1:\n # select random for breeding\n r = np.random.randint(0,len(fitnesses))\n r2 = np.random.randint(0,len(fitnesses) - 1)\n if r2 >= r:\n r2 +=1\n gen = crossover(connections, genotypes[r], fitnesses[r], genotypes[r2], fitnesses[r2])\n new_gen.append(gen)\n new_gen.append(genotypes[fi])\n # stop if we have 5 candidates\n if len(new_gen) > 10:\n break\n genotypes = new_gen", "def generate_students(G, school_type,N_classes,class_size,p_children,p_parents):\n\tage_bracket = get_age_bracket(school_type)\n\t# mapping of classes to ages\n\tage_bracket_map = get_age_distribution(school_type, N_classes)\n\n\t# number of students of every age group required to fill all classes of \n\t# the school\n\tN_target_students = {age:0 for age in age_bracket_map.values()}\n\tfor age in age_bracket_map.values():\n\t\tN_target_students[age] += class_size \n\n\tN_current_students = {i:0 for i in age_bracket}\n\tstudent_counter = 1\n\tfamily_counter = 1\n\tfamily_member_counter = 1\n\n\t# generate students and their families until the school is full\n\twhile (np.asarray([N_target_students[age] for age in age_bracket]) - \\\n\t\tnp.asarray([N_current_students[age] for age in age_bracket])).sum() > 0:\n\n\n\t\tages, N_parents = generate_student_family(school_type, p_children,\n\t\t\t\t\t\t\t\t\t\t\t\t p_parents)\n\n\t\t# Keep the family if at least one of the children fits into the school. \n\t\t# Else the family has to be discarded and a new one created.\n\t\tfits_in_school = []\n\t\tdoesnt_fit = []\n\t\tstudent_nodes = []\n\t\tfamily_nodes = []\n\t\tfor age in ages:\n\t\t\t# there is room for a student with the given age in the school ->\n\t\t\t# add the node to the graph as student\n\t\t\tif age in age_bracket and \\\n\t\t\t N_current_students[age] < N_target_students[age]:\n\n\t\t\t\t# Note: student IDs are created here with a big \"S\" at first.\n\t\t\t\t# Later on (in the function assign_classes()), students will\n\t\t\t\t# be assigned to classes and student node IDs relabelled with\n\t\t\t\t# the final small \"s\" such that s1 is the first student of the\n\t\t\t\t# first class and sN is the last student in the last class.\n\t\t\t\tstudent_ID = 'S{:04d}'.format(student_counter)\n\t\t\t\tG.add_node(student_ID)\n\t\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{student_ID:{'type':'student',\n\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t 'family':family_counter}})\n\t\t\t\tstudent_counter += 1\n\t\t\t\tfits_in_school.append(age)\n\t\t\t\tstudent_nodes.append(student_ID)\n\t\t\t\tN_current_students[age] += 1\n\t\t\telse:\n\t\t\t\tdoesnt_fit.append(age)\n\n\t\t# at least one of the children did fit into the school:\n\t\tif len(fits_in_school) > 0:\n\t\t\t# add the students that didn't fit into the school as family members\n\t\t\tfor age in doesnt_fit:\n\t\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\t\tG.add_node(family_member_ID)\n\t\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\tfamily_nodes.append(family_member_ID)\n\t\t\t\tfamily_member_counter += 1\n\n\t\t\t# parents\n\t\t\tfor parent in range(N_parents):\n\t\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\t\tG.add_node(family_member_ID)\n\t\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t\t# Note: 20.5 is the age at which\n\t\t\t\t\t\t\t\t\t\t\t# the symptom and transmission risk\n\t\t\t\t\t\t\t\t\t\t\t# is that of an adult\n\t\t\t\t\t\t\t\t\t\t 'age':20.5,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\tfamily_member_counter += 1\n\t\t\t\tfamily_nodes.append(family_member_ID)\n\n\t\t\t# increase the family counter by one\n\t\t\tfamily_counter += 1\n\n\treturn family_member_counter, family_counter", "def generational_replacement(random, population, parents, offspring, args):\r\n num_elites = args.setdefault('num_elites', 0)\r\n population.sort(reverse=True)\r\n offspring.extend(population[:num_elites])\r\n offspring.sort(reverse=True)\r\n survivors = offspring[:len(population)]\r\n return survivors", "def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p", "def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount", "def create_trees(self, importance_values: List[int]) -> None:\n target_names = [\"Fail\", \"Pass\"]\n trees = defaultdict(list)\n for importance in importance_values:\n for i in range(7):\n print(f'making tree for week {i + 1} with importance {importance}')\n x_train, x_test, y_train, y_test = self.prep_tree_data(i + 1)\n tree = TreeClassifier(x_train, x_test, y_train, y_test, target_names, importance)\n tree.run_model()\n trees[importance].append(tree)\n\n self.trees = trees", "def _build_trees(tree, forest, X, Y, sample_weight, tree_idx, n_trees,\n n_samples_bootstrap=None):\n # Initialize the number of samples input data\n n_samples = X.shape[0]\n\n # If the samples are drawn with replacement, then,\n # weight the sample weights by the number of times\n # that each sample appears on the indexes\n if forest.bootstrap:\n # Check the sample weights, initializing them to an\n # uniform distribution if they are not provided and,\n # if provided, copying them to properly weight the\n # samples according to the bootstrap indexes\n if sample_weight is None:\n curr_sample_weight = np.ones(n_samples, dtype=np.float64)\n else:\n curr_sample_weight = np.array(sample_weight, dtype=np.float64)\n # Obtain the sample weights\n # from to the bootstrap indexes\n indexes = _generate_sample_indexes(tree.random_state, n_samples,\n n_samples_bootstrap)\n sample_counts = np.bincount(indexes, minlength=n_samples)\n curr_sample_weight *= sample_counts\n # Fit the estimator using the sample weight\n # obtained from the bootstrap indexes\n tree.fit(X, Y, curr_sample_weight)\n # Otherwise, directly use the sample\n # weight provided in the fit method\n else:\n tree.fit(X, Y, sample_weight)\n\n # Return the built tree\n return tree", "def step(self, particles, best_state, best_fitness, run_locals):\r\n # continuous testing of inputs\r\n if self.testing_unit.testing_level > 1 and not self.testing_unit.c_test_step_inp(particles,\r\n best_state,\r\n best_fitness,\r\n run_locals):\r\n raise ValueError(\"step won't run, input's aren't valid.\")\r\n # apply the fitness function to get this generations fitness values\r\n fitness = np.empty((particles.shape[0]))\r\n #fitness = np.apply_along_axis(run_locals[\"fitness_function\"], 0, particles[:, 0, :, :]) # hopefully works\r\n for i in range(particles.shape[0]):\r\n fitness[i] = run_locals[\"fitness_function\"](particles[i, 0])\r\n\r\n # find any personal improvements\r\n better = best_fitness < fitness\r\n # set them\r\n best_fitness[better] = fitness[better]\r\n # set their states\r\n best_state[better] = particles[better, 0]\r\n\r\n # find highest of group\r\n best_of_group = np.argmax(best_fitness, axis=0)\r\n\r\n if self.verbosity > 6: # some random high verbosity outputs that were once used for debugging, might give ideas\r\n print(\"step high verb: \")\r\n print(particles[0])\r\n print(particles[:, 1].shape)\r\n print(best_state.shape)\r\n print(np.repeat(best_state[best_of_group][np.newaxis, :], particles[:, 1].shape[0], axis=0).shape)\r\n\r\n # run calculation for the velocity calculation\r\n # Maurice Clerc. Standard Particle Swarm Optimisation. 2012. hal-00764996\r\n particles[:, 1] = (run_locals[\"PSO_VELOCITY_WEIGHT\"] * particles[:, 1] +\r\n run_locals[\"PSO_INDIVIDUAL_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state - particles[:, 0]) +\r\n run_locals[\"PSO_GROUP_WEIGHT\"] * np.random.rand(particles[:, 0].shape[0],\r\n particles[:, 0].shape[1],\r\n particles[:, 0].shape[2]) *\r\n (best_state[best_of_group] - particles[:, 0]))\r\n\r\n # run calculation for point calculation\r\n particles[:, 0] = particles[:, 0] + particles[:, 1]\r\n #if True and ((particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0]).any() or \\\r\n # (particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1]).any()):\r\n #print(particles[:, 0].shape)\r\n #mask = np.logical_or(particles[:, 0] < np.array(run_locals[\"axes\"])[:, 0],\r\n # particles[:, 0] > np.array(run_locals[\"axes\"])[:, 1])\r\n #print(particles.shape)\r\n #print(np.arange(particles.shape[0]).shape)\r\n #print(np.arange(particles.shape[0])[mask])\r\n #print(particles[np.argmax(mask), 1])\r\n # clip the particles to be within the axes\r\n particles[:, 0] = np.clip(particles[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 0],\r\n np.array(run_locals[\"axes\"])[:, 1])\r\n #if self.globi < 10:\r\n # self.glob[self.globi] = particles[0, 0, 0, 0]\r\n # self.guub[self.globi] = particles[0, 1, 0, 0]\r\n # self.glub[self.globi] = best_state[best_of_group][0, 0]\r\n # self.globi += 1\r\n #else:\r\n #print(self.glob[:10])\r\n #print(self.guub[:10])\r\n #print(self.glub[:10])\r\n #raise ValueError(self.glob)\r\n\r\n return particles, best_state, best_fitness", "def nsga_replacement(random, population, parents, offspring, args):\n survivors = []\n combined = list(population)\n combined.extend(offspring)\n \n # Perform the non-dominated sorting to determine the fronts.\n fronts = []\n pop = set(range(len(combined)))\n while len(pop) > 0:\n front = []\n for p in pop:\n dominated = False\n for q in pop:\n if combined[p] < combined[q]:\n dominated = True\n break\n if not dominated:\n front.append(p)\n fronts.append([dict(individual=combined[f], index=f) for f in front])\n pop = pop - set(front)\n \n # Go through each front and add all the elements until doing so\n # would put you above the population limit. At that point, fall\n # back to the crowding distance to determine who to put into the\n # next population. Individuals with higher crowding distances\n # (i.e., more distance between neighbors) are preferred.\n for i, front in enumerate(fronts):\n if len(survivors) + len(front) > len(population):\n # Determine the crowding distance.\n distance = [0 for _ in range(len(combined))]\n individuals = list(front)\n num_individuals = len(individuals)\n num_objectives = len(individuals[0]['individual'].fitness)\n for obj in range(num_objectives):\n individuals.sort(key=lambda x: x['individual'].fitness[obj])\n distance[individuals[0]['index']] = float('inf')\n distance[individuals[-1]['index']] = float('inf')\n for i in range(1, num_individuals-1):\n distance[individuals[i]['index']] = (distance[individuals[i]['index']] + \n (individuals[i+1]['individual'].fitness[obj] - \n individuals[i-1]['individual'].fitness[obj]))\n \n crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]\n crowd.sort(key=lambda x: x['dist'], reverse=True)\n last_rank = [combined[c['index']] for c in crowd]\n r = 0\n num_added = 0\n num_left_to_add = len(population) - len(survivors)\n while r < len(last_rank) and num_added < num_left_to_add:\n if last_rank[r] not in survivors:\n survivors.append(last_rank[r])\n num_added += 1\n r += 1\n # If we've filled out our survivor list, then stop.\n # Otherwise, process the next front in the list.\n if len(survivors) == len(population):\n break\n else:\n for f in front:\n if f['individual'] not in survivors:\n survivors.append(f['individual'])\n return survivors", "def __init__(self, X_init: np.ndarray, Y_init: np.ndarray, num_trees: int = 30,\n do_bootstrapping: bool = True, n_points_per_tree: int = 0, seed: int = None) -> None:\n super().__init__()\n\n # Set random number generator for the random forest\n if seed is None:\n seed = np.random.randint(10000)\n self.reg_rng = reg.default_random_engine(seed)\n\n self.n_points_per_tree = n_points_per_tree\n\n self.rf = reg.binary_rss_forest()\n self.rf.options.num_trees = num_trees\n\n self.rf.options.do_bootstrapping = do_bootstrapping\n\n self.rf.options.num_data_points_per_tree = n_points_per_tree\n\n self._X = X_init\n self._Y = Y_init\n\n if self.n_points_per_tree == 0:\n self.rf.options.num_data_points_per_tree = X_init.shape[0]\n\n data = reg.default_data_container(self._X.shape[1])\n\n for row_X, row_y in zip(X_init, Y_init):\n data.add_data_point(row_X, row_y)\n\n self.rf.fit(data, self.reg_rng)", "def generate_random_walker():\n # must have seeds that generate known problems\n must_have_seeds = [112, 308, 393]\n for seed in must_have_seeds:\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections\n while True:\n seed = random.randint(0, 2**10)\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections", "def findRFBestDepth():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n rforest = ensemble.RandomForestClassifier(max_depth=max_depth, n_estimators=100)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n rforest = rforest.fit(cv_data_train, cv_target_train)\n trainng_score += [rforest.score(cv_data_train,cv_target_train)]\n testing_score += [rforest.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth", "def generational_replacement(random, population, parents, offspring, args):\n num_elites = args.setdefault('num_elites', 0)\n population.sort(reverse=True)\n offspring.extend(population[:num_elites])\n offspring.sort(reverse=True)\n survivors = offspring[:len(population)]\n return survivors", "def __init__(self, n_estimators=100, max_depth=2**31-1, learning_rate=0.1, min_samples_split=2,\n min_samples_leaf=1, subsample=1.0, colsample_bytree=1.0, max_bin=225, random_state=None):\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.learning_rate = learning_rate\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.subsample = subsample\n self.colsample_bytree = colsample_bytree\n self.max_bin = max_bin\n self.random_state = random_state\n self.f_0 = None\n self.trees = dict()", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter", "def random_tree(self, grow, max_depth, depth=0):\n # Full method\n if depth < MIN_DEPTH or (depth < MAX_DEPTH and not grow):\n self.body = random.choice(operators)\n elif depth >= MAX_DEPTH:\n self.body = random.choice(terminals)\n else: # intermediate depth, grow\n if random.random() > 0.9:\n self.body = random.choice(operators)\n else:\n self.body = random.choice(terminals)\n if self.body in operators:\n self.left = Tree()\n self.left.random_tree(grow, max_depth, depth=depth + 1)\n self.right = Tree()\n self.right.random_tree(grow, max_depth, depth=depth + 1)", "def minimum_spanning_tree(self):\n if self._directed:\n raise Exception('Current implementation of minimum spanning tree does not work for directed graphs')\n vertices = [self._vertex_dict[x].abstract_vertex for x in self._vertex_dict]\n tree = {'vertices': [random.choice(vertices)], 'edges': []}\n while len(tree['vertices']) < len(vertices):\n best_edge_number = None\n best_edge = None\n best_vertex = None\n vertex_names = [vertex.label for vertex in tree['vertices']]\n for vertex in tree['vertices']:\n for edge in vertex.edges:\n if edge not in vertex_names and (vertex.edges[edge] < best_edge_number or best_edge is None):\n best_edge_number = vertex.edges[edge]\n best_edge = self._edge_dict[vertex.label, edge]\n best_vertex = edge\n tree['vertices'].append(self._vertex_dict[best_vertex].abstract_vertex)\n tree['edges'].append(best_edge)\n return tree['edges']", "def finetune_depth():\n start_depth = 3\n tol = 10E-4\n best_depth = start_depth\n acc = [-1]\n for i in tqdm(range(20),desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n XGBCla = get_XGBmodel(depth=i+start_depth)\n XGBCla.fit(X_train, y_train)\n pred = XGBCla.predict(X_test)\n acc.append(accuracy_score(y_test, pred))\n if (abs(acc[i]-acc[i+1])<tol):\n break\n if (acc[i]<acc[i+1]):\n best_depth = start_depth + i\n print(\"Accuracy: %.4f\" % acc[-1])\n print(\"Best depth: %d\" % best_depth)", "def grow_trees(self, regrow=False):\n if self.forest == [] or regrow:\n mtry = int(math.floor(math.sqrt(len(self.variables))))\n data, trees, var, pred_index = self.data, self.trees, self.variables, self.prediction_index\n attr_fn, dist_classes, order, imp = self.attr_fn, self.dist_classes, len(self.data), self.importance_fn\n self.forest = random_forest.RandomForest(data, trees, mtry, var, pred_index, attr_fn, dist_classes, order, imp)\n print self.trees, ' have been grown using a set of ', len(self.variables), ' variables.'\n else:\n print \"Already a forest in place, add regrow=True to override.\"", "def build_random_forest(X_train, y_train):", "def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]", "def tree(self):\n\n tree_parameters = [{'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1]}]\n tree_grid = GridSearchCV(estimator=DecisionTreeRegressor(),\n param_grid=tree_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n tree_grid_result = tree_grid.fit(self.X_train, self.y_train)\n best_tree_parameters = tree_grid_result.best_params_\n tree_score = tree_grid_result.best_score_\n print('Best tree params: ' + str(best_tree_parameters))\n print('Tree score: ' + str(tree_score))\n return DecisionTreeRegressor(\n min_samples_leaf=best_tree_parameters['min_samples_leaf'],\n criterion=best_tree_parameters['criterion'],\n random_state=1)", "def BiasedTree(N,alpha=0.): \n free = sample(range(N),N)\n nodes = [free.pop()]\n links = []\n K = np.zeros((N,))\n K[nodes[0]]=1.\n while free:\n newn = free.pop()\n K[newn]=1.\n p = K[np.array(nodes)]**alpha\n p = p/np.sum(p)\n mother = np.random.choice(nodes,p=p)\n K[mother] += 1.\n nodes.append(newn)\n links.append((mother,newn))\n return nx.DiGraph(links)", "def fitness(individual, size, seed=0):\n\n np.random.seed(seed)\n values = individual.dataframe.values.flat\n sample = np.random.choice(values, size=size)\n return min(sample)", "def main():\n # If you change this, use an odd number.\n SIZE = 21\n print(makeTree(SIZE))", "def generation(self):\n\n # Re-count and re-collect all root Teams from the main Team\n # population into self.agent_pop\n self.updateAgentPopulation()\n\n # Generate new root Teams as variations of other Teams\n while self.getNumRootTeams() < Trainer.R_SIZE:\n\n # Randomly select parent\n parent = choice(self.agent_pop)\n\n # Copy the parent Team and perform mutation. Note that mutation\n # may result in the creation of new root Teams\n child = Team(parent.team)\n self.mutateTeam(child)\n\n # Add new Team to the Team populations\n self.team_pop.append(child)\n\n # Since mutation can theoretically cause new root Teams to be created,\n # run a check for this just out of curiosity.\n if self.getNumRootTeams() != Trainer.R_SIZE:\n print(\"NOTE - Trainer::generation - self.getNumRootTeams() != Trainer.R_SIZE\")\n print(\" self.getNumRootTeams() =\", self.getNumRootTeams())", "def _local_seeding(self, space: Space, function: Function) -> None:\n\n new_agents = []\n for i, agent in enumerate(space.agents):\n if self.age[i] == 0:\n for _ in range(self.LSC):\n child = copy.deepcopy(agent)\n\n j = r.generate_integer_random_number(high=child.n_variables)\n child.position[j] += r.generate_uniform_random_number(\n child.lb[j], child.ub[j]\n )\n child.clip_by_bound()\n\n child.fit = function(child.position)\n\n new_agents.append(child)\n\n self.age = [age + 1 for age in self.age]\n\n space.agents += new_agents\n\n self.age += [0] * len(new_agents)", "def bad_seed(\n partner1_salary_compound_rate: float,\n partner1_salary_plateau: float,\n partner2_salary_compound_rate: float,\n partner2_salary_plateau: float,\n initial_year: int,\n increase_savings_weight: float,\n initial_tfsa_guess: float,\n final_tfsa_guess: float,\n initial_equalize_income_weighting_guess: float,\n final_equalize_income_weighting_guess: float,\n partner1_year_of_retirement: int,\n partner2_year_of_retirement: int,\n final_year: int,\n rrsp_adjustment_guess: float,\n rrsp_interest_rate: float,\n tfsa_interest_rate: float,\n optimize: solve.Optimizing_Solver,\n):\n\n initial_tfsa_func = optimize.subscribe_optimized_scalar(\n \"initial_tfsa\", 0, 1, initial_tfsa_guess\n )\n final_tfsa_func = optimize.subscribe_optimized_scalar(\n \"final_tfsa\", 0, 1, final_tfsa_guess\n )\n initial_equalize_income_weighting_func = optimize.subscribe_optimized_scalar(\n \"initial_equalize_income_weighting\",\n 0,\n 1,\n initial_equalize_income_weighting_guess,\n )\n final_equalize_income_weighting_func = optimize.subscribe_optimized_scalar(\n \"final_equalize_income_weighting\", 0, 1, final_equalize_income_weighting_guess\n )\n rrsp_adjustment_func = optimize.subscribe_optimized_scalar(\n \"rrsp_adjustment\", -1, 1, rrsp_adjustment_guess\n )\n\n return _bad_seed_raw(\n partner1_salary_compound_rate,\n partner1_salary_plateau,\n partner2_salary_compound_rate,\n partner2_salary_plateau,\n initial_year,\n increase_savings_weight,\n initial_tfsa_func,\n final_tfsa_func,\n initial_equalize_income_weighting_func,\n final_equalize_income_weighting_func,\n partner1_year_of_retirement,\n partner2_year_of_retirement,\n final_year,\n rrsp_adjustment_func,\n rrsp_interest_rate,\n tfsa_interest_rate,\n )", "def _reweight(self):\n self._seed_weights = [self._graph.degree(seed) for seed in self._seeds]\n weight_sum = np.sum(self._seed_weights)\n self._seed_weights = [float(weight)/weight_sum for weight in self._seed_weights]", "def evolve(pop_perf, breed_method):\n # Sort on the scores.\n pop = [x[1] for x in sorted(pop_perf, key=lambda x: x[0], reverse=True)]\n\n # keep the best 25%\n retain_length = 2 #int(np.ceil(len(pop)*.25))\n\n # The parents are every network we want to keep.\n parents = pop[:retain_length]\n\n # Randomly mutate the networks we're keeping, and add these\n # This preserves the already good networks, so we don't lose out.\n mutated = []\n for index, individual in enumerate(parents):\n mutated.append(mutate(parents[index]))\n parents.extend(mutated)\n\n # For those we aren't keeping, randomly add 10% of population to increase variance. Mutate them individually, then add. \n # Mutation because we already know they are bad, should try something else. Something like that.\n num_poor = 2#int(math.ceil(len(pop)*.1))\n poor_keeping = random.sample(pop[retain_length:], num_poor)\n for poor_sch in poor_keeping:\n parents.append(mutate(poor_sch))\n\n # Now find out how many spots we have left to fill. (how many children to make, about 40% of full pop)\n parents_length = len(parents)\n desired_length = len(pop) - parents_length\n children = []\n\n\n # Add children, which are bred from two remaining networks.\n while len(children) < desired_length:\n\n # Get a random mom and dad.\n male = random.randint(0, parents_length-1)\n female = random.randint(0, parents_length-1)\n\n # Assuming they aren't the same network...\n if male != female:\n male = parents[male]\n female = parents[female]\n\n # pick breeding method:\n if random.random() > .5:\n way = 'mean'\n else:\n way = 'random'\n \n # Breed them.\n babies = breed_method(male, female, way)\n\n # children.append(babies[desired_length:])\n # Add the children one at a time.\n for baby in babies:\n # Don't grow larger than desired length.\n if len(children) < desired_length:\n children.append(baby)\n parents.extend(children)\n return parents", "def make_tree(self):\n\n # list [station_name]\n visited = []\n\n # creates empty station object for each station and adds coordinates\n for station in self.stations:\n new_station = Station(station)\n coordinates = self.stations[station].get_coordinates()\n new_station.add_coordinates(coordinates[0], coordinates[1])\n\n # saves station in prims_tree dictionary\n self.prims_tree[station] = new_station\n\n # choose random beginning station\n random_station = random.choice(list(self.stations.values()))\n\n # sort station connections and retrieve shortest\n station_connections = random_station.get_connections()\n station_connections = sorted(station_connections.items(), key=operator.itemgetter(1))\n new_connection = station_connections.pop(0)\n new_station = new_connection[0]\n new_time = new_connection[1]\n\n # retrieve empty stations from prims_tree dictionary\n first_station = self.prims_tree[random_station.name]\n new_station = self.prims_tree[new_station.name]\n\n # add shortest connection to stations\n first_station.add_connection(new_station, new_time)\n new_station.add_connection(first_station, new_time)\n\n # add stations to visited\n visited.append(first_station.name)\n visited.append(new_station.name)\n\n # runs until all stations are visited\n while len(visited) is not len(self.prims_tree):\n # starts as arbitrarily high number\n min_connection_time = 9999\n\n # get connections of visited stations\n for station in visited:\n connections = self.stations[station].get_connections()\n\n # get time of connections\n for connection in connections:\n connection_time = connections[connection]\n\n # save smallest connection if time is smallest and station is not visited\n if connection.name not in visited and connection_time < min_connection_time:\n smallest_connection = self.prims_tree[connection.name]\n smallest_connection_station = self.prims_tree[station]\n min_connection_time = connection_time\n else:\n continue\n\n # add smallest connection to station in prims_tree dictionary\n smallest_connection_station.add_connection(smallest_connection, min_connection_time)\n smallest_connection.add_connection(smallest_connection_station, min_connection_time)\n\n # add new connection to visited list\n visited.append(smallest_connection.name)\n\n return self.prims_tree", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def __init__(self, n_trees=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, \n max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,\n verbose=0, min_density=None, compute_importances=None): \n self.random_forest = RandomForestClassifier(n_trees, criterion, max_depth, min_samples_split, min_samples_leaf, \n max_features, max_leaf_nodes, bootstrap, oob_score, n_jobs, random_state,\n verbose, min_density, compute_importances)", "def main():\n\n config = read_json_file(CONFIG_FILE)\n posititve_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"POSITIVE_FILENAME\"]\n )\n negative_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"NEGATIVE_FILENAME\"]\n )\n complexity_factor = config[\"main\"][\"COMPLEXITY_FACTOR\"]\n max_sequences_to_fit_pos = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_POS\"]\n max_sequences_to_fit_neg = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_NEG\"]\n\n input_organisms_path = config[\"main\"][\"INPUT_FILENAME\"]\n mean_nodes = 3.0\n mean_fitness = 150\n positive_dataset = read_fasta_file(posititve_path)\n positive_dataset.sort()\n negative_dataset = read_fasta_file(negative_path)\n print(\"{} {}\".format(len(positive_dataset), len(negative_dataset)))\n\n organism_factory = OrganismFactory(\n config[\"organism\"],\n config[\"organismFactory\"],\n config[\"connector\"],\n config[\"pssm\"],\n )\n\n a_organisms = organism_factory.import_organisms(input_organisms_path)\n # random.shuffle(negativeDataset)\n\n for org in a_organisms:\n\n # org.print()\n nodes = org.count_nodes()\n\n p_1 = org.get_seq_set_fitness(\n positive_dataset[:max_sequences_to_fit_pos]\n )\n n_1 = org.get_seq_set_fitness(\n negative_dataset[:max_sequences_to_fit_neg]\n )\n # p1 = 20\n # n1 = org.getSeqSetFitness(negativeDataset[31:32])\n c_1 = org.get_complexity(mean_nodes, mean_fitness)\n\n # Score\n fitness = p_1 - n_1\n effective_fitness = fitness - complexity_factor * c_1\n print(\n (\n \"ORG {} N: {:.2f} P: {:.2f} N: {:.2f} C: {:.2f} F: {:.2f}\"\n + \" EF: {:.2f}\\n\"\n ).format(org._id, nodes, p_1, n_1, c_1, fitness, effective_fitness)\n )\n\n export_organism(\n org,\n positive_dataset,\n \"{}positive_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )\n # exportOrganism(\n # org,\n # negativeDataset[31:32],\n # \"{}negative_{}\".format(config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org.ID),\n # organismFactory,\n # )\n\n export_organism(\n org,\n negative_dataset[:50],\n \"{}negative_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )", "def initLocalBestChoice(self):\n random.seed()\n return", "def _generate_raw_environments(self, num, seed):", "def uct(root_state, iter_max, verbose=False):\n\n root_node = Node(state=root_state)\n\n for i in range(iter_max):\n node = root_node\n state = root_state.clone()\n\n # Select\n # node is fully expanded and non-terminal\n while not node.untried_moves and node.child_nodes:\n node = node.uct_select_child()\n state.do_move(node.move)\n\n # Expand\n # if we can expand (i.e. state/node is non-terminal)\n if node.untried_moves:\n m = random.choice(node.untried_moves)\n state.do_move(m)\n # add child and descend tree\n node = node.add_child(m, state)\n\n # Rollout - this can often be made orders of magnitude quicker\n # using a state.get_random_move() function\n # while state is non-terminal\n while state.get_moves():\n state.do_move(random.choice(state.get_moves()))\n\n # Backpropagate\n # backpropagate from the expanded node and work back to the root node\n while node is not None:\n # state is terminal. Update node with result\n # from POV of node.playerJustMoved\n node.update(state.get_result(node.player_just_moved))\n node = node.parent_node\n\n # Output some information about the tree - can be omitted\n if verbose:\n print(root_node.tree_to_string(0))\n else:\n print(root_node.children_to_string())\n\n # return the move that was most visited\n return sorted(root_node.child_nodes,\n key=lambda c: c.Q)[-1].move", "def create_program(fe: FitnessEvaluator, max_len: int) -> str:\n\n # mut_prob = {\"<\": 0.8, \">\": 0.8, \"+\": 0.6, \"-\": 0.6, \"[\": 0.1, \"]\": 0.1}\n\n # new_population: List[Program] = []\n\n # k = 1000\n # N = 0.5 # N is top percentile for selection process\n\n converges = True\n gen_no = 0\n\n while 1:\n k = 1000 # k represents the initial population size\n gen_no = gen_no + 1\n print(gen_no)\n if gen_no == 100:\n converges = True\n gen_no = 0\n\n # generate initial random, score initial random, add to population\n if converges:\n converges = False\n population: List[Program] = []\n res = generate_random(fe, max_len, k, population)\n if res != \"\":\n # print(\"from RANDOM\")\n return res\n\n new_population: List[Program] = []\n ct = [0]\n\n while ct[0] != k:\n weights = populate_weights(k, population)\n\n population.sort(key=lambda program: program.score)\n\n selected = random.choices(population, weights=weights, k=k//2)\n selected.sort(key=lambda program: program.score)\n\n if bad_average(selected):\n k = 0\n converges = True\n gen_no = False\n break\n\n res = select(new_population, selected, fe, k//2, ct)\n if res != \"\":\n return res\n\n for i in range(k):\n population[i] = new_population[i]", "def _iter_build_most_significant_tree(ktree, stree, node):\n sch = find_significant_children(ktree, node)\n if sch is not None:\n small, big = sch\n stree.parents[small] = node\n stree.parents[big] = node\n stree.children[node] = [small, big]\n stree.population[node] = ktree.population[node]\n stree.descriptor[node] = ktree.descriptor[node]\n stree.weights[node] = ktree.weights[node]\n stree.slides[node] = ktree.slides[node]\n _iter_build_most_significant_tree(ktree, stree, small)\n _iter_build_most_significant_tree(ktree, stree, big)", "def forest_model(params):\n if (params['random']):\n params['n_estimators'] = random.choice([1, 3, 5, 10, 20, 30, 40, 50, 75, 100])\n model = ExtraTreesClassifier(\n n_estimators=params['n_estimators'],\n random_state=0\n )\n\n return model", "def initPopulation(self):\n for i in range(0, self.popSize):\n individual = Individual(self.genSize, self.data)\n individual.computeFitness()\n self.population.append(individual)\n\n self.best = self.population[0].copy()\n for ind_i in self.population:\n if self.best.getFitness() > ind_i.getFitness():\n self.best = ind_i.copy()\n print (\"Best initial sol: \",self.best.getFitness())", "def nsga_replacement(random, population, parents, offspring, args):\r\n survivors = []\r\n combined = list(population)\r\n combined.extend(offspring)\r\n \r\n # Perform the non-dominated sorting to determine the fronts.\r\n fronts = []\r\n pop = set(range(len(combined)))\r\n while len(pop) > 0:\r\n front = []\r\n for p in pop:\r\n dominated = False\r\n for q in pop:\r\n if combined[p] < combined[q]:\r\n dominated = True\r\n break\r\n if not dominated:\r\n front.append(p)\r\n fronts.append([dict(individual=combined[f], index=f) for f in front])\r\n pop = pop - set(front)\r\n \r\n # Go through each front and add all the elements until doing so\r\n # would put you above the population limit. At that point, fall\r\n # back to the crowding distance to determine who to put into the\r\n # next population. Individuals with higher crowding distances\r\n # (i.e., more distance between neighbors) are preferred.\r\n for i, front in enumerate(fronts):\r\n if len(survivors) + len(front) > len(population):\r\n # Determine the crowding distance.\r\n distance = [0 for _ in range(len(combined))]\r\n individuals = list(front)\r\n num_individuals = len(individuals)\r\n num_objectives = len(individuals[0]['individual'].fitness)\r\n for obj in range(num_objectives):\r\n individuals.sort(key=lambda x: x['individual'].fitness[obj])\r\n distance[individuals[0]['index']] = float('inf')\r\n distance[individuals[-1]['index']] = float('inf')\r\n for i in range(1, num_individuals-1):\r\n distance[individuals[i]['index']] = (distance[individuals[i]['index']] + \r\n (individuals[i+1]['individual'].fitness[obj] - \r\n individuals[i-1]['individual'].fitness[obj]))\r\n \r\n crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]\r\n crowd.sort(key=lambda x: x['dist'], reverse=True)\r\n last_rank = [combined[c['index']] for c in crowd]\r\n r = 0\r\n num_added = 0\r\n num_left_to_add = len(population) - len(survivors)\r\n while r < len(last_rank) and num_added < num_left_to_add:\r\n if last_rank[r] not in survivors:\r\n survivors.append(last_rank[r])\r\n num_added += 1\r\n r += 1\r\n # If we've filled out our survivor list, then stop.\r\n # Otherwise, process the next front in the list.\r\n if len(survivors) == len(population):\r\n break\r\n else:\r\n for f in front:\r\n if f['individual'] not in survivors:\r\n survivors.append(f['individual'])\r\n return survivors", "def er_random_graph_generator(n, p, ng, seed, w_base, w_top):\n\n f_er_graph_list = []\n for i in range(0, ng):\n f_g = nx.erdos_renyi_graph(n, p, seed + i, directed=False)\n for (u, v, w) in f_g.edges(data=True):\n w['weight'] = random.randint(w_base, w_top)\n f_er_graph_list.append(f_g)\n return f_er_graph_list", "def evolve_population(population):\r\n \r\n \r\n pop = sort_population(population)\r\n \r\n # test de la meilleure solution ?\r\n\r\n #On choisit les parents\r\n #parents = pop[:NB_POP_TO_KEEP]\r\n for individual in pop[:NB_POP_TO_KEEP]:\r\n parents.append(i[0])\r\n \r\n #On garde des mauvais\r\n for individual in pop[NB_POP_TO_KEEP:]:\r\n if random.random() < PART_OF_BAD_TO_KEEP :\r\n parents.append(i[0])\r\n \r\n \r\n #On réalise des mutations\r\n for individual in parents :\r\n if random.random() < CHANCE_TO_MUTATE :\r\n indice = int( random.random() * PARAMETERS_COUNT )\r\n individual[indice] = random.random()\r\n \r\n #Create new pop\r\n size_parents = len(parents)\r\n size_to_create = POPULATION_COUNT - size_parents\r\n children = []\r\n while len(children) < size_to_create:\r\n parent1 = choose(parents)\r\n parent2 = choose(parents)\r\n child = parent1[:(PARAMETERS_COUNT/2)] + parent2[(PARAMETERS_COUNT/2):]\r\n children.append(child)\r\n \r\n return parents", "def PickSeeds(self, leaf1, leaf2):\n d = 0\n t1 = 0\n t2 = 0\n\n # iter all possible children nodes, to find the max difference.\n for i in range(len(self.leaves)):\n for j in range(i + 1, len(self.leaves)):\n MBR_new = merge(self.leaves[i].MBR, self.leaves[j].MBR)\n S_new = 1.0 * (MBR_new['xmax'] - MBR_new['xmin']) * (MBR_new['ymax'] - MBR_new['ymin'])\n S1 = 1.0 * (self.leaves[i].MBR['xmax'] - self.leaves[i].MBR['xmin']) * (\n self.leaves[i].MBR['ymax'] - self.leaves[i].MBR['ymin'])\n S2 = 1.0 * (self.leaves[j].MBR['xmax'] - self.leaves[j].MBR['xmin']) * (\n self.leaves[j].MBR['ymax'] - self.leaves[j].MBR['ymin'])\n if S_new - S1 - S2 > d:\n t1 = i\n t2 = j\n d = S_new - S1 - S2\n n2 = self.leaves.pop(t2)\n n2.father = leaf1\n leaf1.leaves.append(n2)\n leaf1.MBR = leaf1.leaves[0].MBR\n n1 = self.leaves.pop(t1)\n n1.father = leaf2\n leaf2.leaves.append(n1)\n leaf2.MBR = leaf2.leaves[0].MBR", "def selection(self):\n\n for i in range(self.pop_num*3): # It is important. Next, we will rank the array of parents and children in ascending order of survivability (sum (fit)).\n self.par_and_sons[i].fit = SimpleSegmentationGA.fitness_function(self.gray, self.delta_x, self.length, self.par_and_sons[i].A)\n\n # Sort.\n self.par_and_sons = sorted(self.par_and_sons, key=lambda individ: individ.fit) \n self.population=self.par_and_sons[:self.pop_num].copy()", "def guessTreeOpt(train, test, valid):\n best = findApproxDepth(train, valid, 5, 5)\n tree = DecisionTree(train)\n print(\"building tree from full set\")\n tree.buildTree(best[0], best[1], True)\n print(\"tree built, testing tree\")\n acc = testTreeF(tree, test)\n print(\"accuracy of:\", \"%.2f\" % (acc * 100))\n return tree", "def __init__(self, memory_size, batch_size, alpha, mu, seed):\n self.tree = SumTree(memory_size)\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.alpha = alpha\n self.__e = 0.01\n self.__mu = mu\n np.random.seed(seed)", "def bst_100_rand():\n from bbst import Bst\n from random import shuffle\n rando = [num for num in range(100)]\n shuffle(rando)\n tree = Bst(rando)\n return tree", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def test_small_tree_treewidth(self):\n G = self.small_tree\n # the order of removal should be [1,2,4]3[5,6,7]\n # (with [] denoting any order of the containing nodes)\n # resulting in treewidth 2 for the heuristic\n treewidth, _ = treewidth_min_fill_in(G)\n assert_equals(treewidth, 2)", "def _do_update(self):\n sample = np.random.choice(self._seeds, 1, replace=False, p=self._seed_weights)[0]\n index = self._seeds.index(sample)\n new_seed = random.choice([neb for neb in self._graph.neighbors(sample)])\n self._edges.add((sample, new_seed))\n self._nodes.add(sample)\n self._nodes.add(new_seed)\n self._seeds[index] = new_seed" ]
[ "0.6857431", "0.6693448", "0.620608", "0.61089987", "0.60859585", "0.60058355", "0.59534144", "0.5926585", "0.5920129", "0.59118456", "0.58440024", "0.5819384", "0.5805844", "0.5803149", "0.5793257", "0.5775363", "0.5769751", "0.5761044", "0.5759271", "0.5751068", "0.5741327", "0.5735114", "0.57342404", "0.573164", "0.5725142", "0.5724565", "0.57161117", "0.571406", "0.57060975", "0.56925696", "0.5675556", "0.56714666", "0.5663999", "0.56564444", "0.5653783", "0.565099", "0.56348187", "0.5631936", "0.56313884", "0.5628846", "0.5626083", "0.5620615", "0.56168866", "0.56119573", "0.55688703", "0.5565804", "0.5556349", "0.5548278", "0.5547268", "0.55441946", "0.5543183", "0.5542615", "0.5534804", "0.5532145", "0.55259746", "0.5510027", "0.54984266", "0.5497924", "0.54964775", "0.5494477", "0.54899096", "0.5485209", "0.5482233", "0.54745203", "0.5468924", "0.5457753", "0.54532427", "0.54413795", "0.5439939", "0.54385084", "0.5437562", "0.54285014", "0.5423152", "0.5409433", "0.5406653", "0.5398994", "0.53906864", "0.5386467", "0.538419", "0.5383976", "0.5382249", "0.5375195", "0.5372212", "0.5370635", "0.5367775", "0.536383", "0.5363198", "0.5360546", "0.5351857", "0.53504884", "0.534401", "0.53413564", "0.534057", "0.53382885", "0.53289115", "0.53285384", "0.53260237", "0.53236437", "0.53236437", "0.5318862" ]
0.784303
0
Computes the number of seeds given a fitness value.
def compute_seeds(fitness): seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \ (self.max_seeds-self.min_seeds) + self.min_seeds return round(seeds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_fitness(self, **kwargs):\n self.__fitness = self.fitness_function.calculate(self.__genes, **kwargs)\n self.num_fitness_eval += 1\n return self.__fitness", "def fitness(self):\n # TO BE DECIDED\n return 1", "def calcFitness (self) :\n fitnessArray = [[8, 4, 2, 1],\n [16, 8, 4, 2],\n [32, 16, 8, 4],\n [64, 32, 16, 8]]\n # fitnessArray = [[160, 80, 5, 4],\n # [320, 40, 4, 3],\n # [640, 20, 3, 2],\n # [1280, 10, 2, 1]]\n fitness = 0\n for k in range(4) :\n for i in range (4) :\n fitness += self.grid[k,i] * fitnessArray[k][i]\n return (fitness / 100)", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def fitness_function(self, population: List[Network]) -> List[Union[float, int]]:\n # The seed changes\n self.last_used_seed += 1\n\n # Snakes are re-generated\n snakes = []\n for n in population:\n snakes.append(Snake(11, Experiment.ExperimentAI(n)))\n\n # Metrics are calculated\n scores, times = self.snake_game.simulate(snakes, self.last_used_seed)\n\n # The fitnesses are calculated\n fitnesses = []\n for i in range(len(scores)):\n f = scores[i]*(1.0 + 1.0/float(times[i]))\n fitnesses.append(f)\n\n return fitnesses", "def evaluate(self, state):\n\n fitness = np.sum(state)\n self.num_evals += 1\n #print(self.num_evals)\n return fitness", "def fitness(dna):\n fitness = 0\n for c in range(DNA_SIZE):\n if dna[c] == OPTIMAL[c]:\n fitness += 1\n return fitness", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def calculate_fitness_value(self):\n sequence = ''.join(self.genes)\n if sequence in seq_to_fitness:\n self.fitness_value = seq_to_fitness[sequence]\n else:\n self.fitness_value = polly_stats.get_amount_of_bad_regions(\n self.genes, self.environment)", "def fitness(ind):\n return kNN.distance(ind),", "def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )", "def evaluate(self, state):\n\n fitness = 0\n\n for i in range(1, len(state)):\n if state[i] != state[i - 1]:\n fitness += 1\n\n return fitness", "def _calculate_fitness(self):\n pass", "def calculate_fitness_test(self, **kwargs):\n if self.genes_test is None:\n raise ValueError(\"Genes test is not set!\")\n\n self.__fitness_test = self.fitness_function.calculate(self.__genes_test, **kwargs)\n self.num_fitness_eval += 1", "def fitness(self):\n pass", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def average_fitness(individuals):\n fitness_num = 0\n for individual in individuals:\n fitness = individual.get_fitness()\n fitness_num += fitness\n return fitness_num / len(individuals)", "def calc_sum_fitness(self):\n fitness: float = 0\n for c in self.characters:\n fitness += c.fitness\n self.sum_fitness = round(fitness, 3)", "def fitness(self) -> float:\n return self._fitness", "def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run", "def fitness_function(neural_net):\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness", "def compute_tot_fitness(fitness_function, pop):\n probs = np.zeros(len(pop)) # list to house probabilites\n best_member = ''\n best_fitness = -10**18\n total_fitness = 0 # The sum of of all the fitness values from the population.\n for i, chromosome in enumerate(pop):\n new_fitness = fitness_function(chromosome)\n if new_fitness > best_fitness:\n best_member = chromosome\n best_fitness = new_fitness\n total_fitness += new_fitness\n probs[i] = new_fitness\n probs = probs / total_fitness\n return total_fitness, best_fitness, best_member, probs", "def fitness(individual):\n different_pos = 0\n return different_pos", "def fitness(self, solution):\n cur_fit = 0\n for i in range(self.N):\n cur_fit += self.dist(solution[i % self.N], solution[(i + 1) % self.N])\n return cur_fit", "def calculate_cluster_fitness(self, cluster_id: ObjectId):\n\n genomes = self.genome_repository.get_genomes_in_cluster(cluster_id)\n\n cluster_fitness = 0\n\n for genome in genomes:\n cluster_fitness += genome.fitness\n if cluster_fitness == 0:\n return 0\n\n return cluster_fitness / len(list(genomes))", "def _fitness(individual, X, y):\n yhat = individual.evaluate(X)\n return ((y - yhat) ** 2).sum()", "def fitness(individual, n_clusters=3, n_seeds=5):\n\n dataframe = common.scale_dataframe(individual)\n corr = abs(individual.dataframe.corr().iloc[0, 1])\n differences = []\n for seed in range(n_seeds):\n km = KMeans(n_clusters, random_state=seed).fit(dataframe)\n differences.append(silhouette_score(dataframe, km.labels_) - corr)\n\n best = max(differences)\n return best", "def fitness(self):\n return (len(self.body)**2) * self.age", "def get_overall_fitness(self):\n total_fitness = 0\n for chromosome_list in self.chromo_list:\n if chromosome_list:\n for chromosomes in chromosome_list:\n total_fitness += chromosomes.fitness\n\n return float(total_fitness/(self.number_chromosomes*\\\n float(self.best_fitness)))", "def calc_fitness_by_gen(self):\r\n f_sum = 0\r\n # first loop gives us the sum of the fitness\r\n for c, _ in self.temp_hist_by_gen.items():\r\n f_sum += c.fitness()\r\n # now we calc the chances by fitness of each one\r\n for c, _ in self.temp_hist_by_gen.items():\r\n self.temp_hist_by_gen[c] = c.fitness() / f_sum", "def calculate_fitness(self):\n fitness = (self.matrix * self.weight_matrix).sum()\n self.fitness = fitness\n return fitness", "def run_tournament_(genes):\n\n n_genes = len(genes)\n scores = np.zeros(n_genes, dtype=np.uint32)\n for i, j in itertools.combinations(range(n_genes), 2):\n s_i, s_j = run_duel(genes[i], genes[j])\n scores[i] += s_i\n scores[j] += s_j\n continue\n\n return scores / (n_genes - 1)", "def get_fitness(self):\n if self.fitness == 0:\n self.fitness = 1 / self.get_cost()\n return self.fitness", "def fitness(individual, size, seed=0):\n\n np.random.seed(seed)\n values = individual.dataframe.values.flat\n sample = np.random.choice(values, size=size)\n return min(sample)", "def get_fitness(self, use_opacity=True):\n chromosome_img = self.generate_chromosome_image(use_opacity=use_opacity)\n fitness = np.sum((chromosome_img.astype(\"float\") - self._img.astype(\"float\")) ** 2)\n fitness /= float(chromosome_img.shape[0] * chromosome_img.shape[1])\n return fitness", "def calculate_fitness_value(sequence, seq_to_fitness, key, program):\n if key not in seq_to_fitness:\n seq_to_fitness[key] = polly_stats.get_amount_of_bad_regions(sequence,\n program)", "def get_individual_fitness(individual):\r\n fitness = 0\r\n # A COMPLETER\r\n \r\n #Si distance avec le point objectif diminue, alors fitness augmente ?\r\n \r\n return fitness", "def run_tournament(genes):\n return _run_tournament(genes) / (len(genes) - 1)", "def get_generation_number(self, size=None):\n if size is None:\n size = self.get_param('population_size')\n if size is None:\n # size = len(list(self.c.select(relaxed=0,generation=0)))\n return 0\n lg = size\n g = 0\n all_candidates = list(self.c.select(relaxed=1))\n while lg > 0:\n lg = len([c for c in all_candidates if c.generation == g])\n if lg >= size:\n g += 1\n else:\n return g", "def fitness(self,*val):\n if len(val): self._fitness = val[0]\n return self._fitness", "def get_fitness(self) -> float:\n return self.fitness", "def fitnessValue(square_data=[]):\n #TODO: Evaluar la red neuronal para obtener el valor fitness. \n # n = math.sqrt(len(square_data))\n # X = square_data\n # Y = [(x, len(list(y))) for x, y in itertools.groupby(X)]\n # # Repetidos:\n # repeated = 0\n # for a in Y:\n # if a[1] > 1:\n # repeated += 1\n # differences = differencesSum(square_data=square_data, n=n)\n # result = (1 + repeated) * differences + (repeated ** 2)\n hyper_p = NeuralNetwork.getHyperParemeters(setup=square_data)\n print('setup: ', square_data)\n print('hyper_p:',hyper_p)\n training, test, model = NeuralNetwork.useNetwork(TRAIN,TEST,LAYERS,alpha=hyper_p[0], iterations=hyper_p[2], lambd=hyper_p[1], keep_prob=hyper_p[3])\n return test", "def random_test(self, source):\r\n ret = 1\r\n for seed in range(1, 40):\r\n if source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x)**2+10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}) != \\\r\n source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x) ** 2 + 10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}):\r\n ret = 0\r\n if ret == 0:\r\n if self.verbosity > 0:\r\n print(\"ERROR: Random seed non functional, results cannot be replicated.\")\r\n return 0\r\n else:\r\n if self.verbosity > 1:\r\n print(\"Random seed functional, results replicable if a seed is used.\")\r\n return 1", "def evaluate_fitness_against_random(self):\n #self.normalize() # Normalize before evaluating\n for i in tqdm(range(self.population_size)):\n self.individual.load_chromosome(self.population[i])\n self.fitness[i] = evaluate_agent(self.individual, self.evaluations_per_chromosome * 4) / (self.evaluations_per_chromosome * 4)\n print(self.fitness)", "def number_of_iterations(self) -> int:\n pass", "def evaluate(genome):\n # base fitness\n fit = 1.0\n # promote 1001 starting motif\n matches = 0\n if genome.sequence_A[0] == 1:\n matches += 1\n if genome.sequence_A[1] == 0:\n matches += 1\n if genome.sequence_A[2] == 0:\n matches += 1\n if genome.sequence_A[3] == 1:\n matches += 1\n fit += matches * 0.1\n # finish\n return fit", "def average_fitness(self):\n return sum([e.fitness for e in self.population]) / len(self.population)", "def evaluate_fitness(self, pos):\n _, index = self.tree.query(pos)\n return 1 - (self.fitness_function[index // self.resolution][index % self.resolution] - self.min) / (self.max - self.min)", "def stochastic_universal_selection(self, fitness, num_parents):\n\n fitness_sum = numpy.sum(fitness)\n if fitness_sum == 0:\n self.logger.error(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n raise ZeroDivisionError(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n probs = fitness / fitness_sum\n probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.\n probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.\n\n curr = 0.0\n\n # Calculating the probabilities of the solutions to form a roulette wheel.\n for _ in range(probs.shape[0]):\n min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]\n probs_start[min_probs_idx] = curr\n curr = curr + probs[min_probs_idx]\n probs_end[min_probs_idx] = curr\n probs[min_probs_idx] = 99999999999\n\n pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.\n first_pointer = numpy.random.uniform(low=0.0, \n high=pointers_distance, \n size=1)[0] # Location of the first pointer.\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_pointer = first_pointer + parent_num*pointers_distance\n for idx in range(probs.shape[0]):\n if (rand_pointer >= probs_start[idx] and rand_pointer < probs_end[idx]):\n parents[parent_num, :] = self.population[idx, :].copy()\n parents_indices.append(idx)\n break\n\n return parents, numpy.array(parents_indices)", "def c_test_fitness_function(self, function):\r\n return 1", "def c_test_fitness_function(self, function):\r\n return 1", "def count_sevens( rolls ):\n count = 0\n total = 0\n while count < rolls:\n if random.randint(1, 6) + random.randint(1, 6) == 7:\n count += 1\n total += 1\n if total >= 12 * rolls:\n return total\n\n return total", "def calc_fitness(variant_fit, orig_fit, generations, count):\n\n Ne = 1000\n #Determines the value that controls variation within the simulation\n if count <= 0.1 * generations:\n beta = 1e-2\n elif count <= 0.9 * generations:\n #y = mx + b: linearly increases\n slope = (1.1 - 1e-2) / (0.8 * generations)\n beta = (slope * (count - (0.1 * generations))) + 1e-2\n else:\n slope = (1.3 - 1.1) / (0.1 * generations)\n beta = (slope * (count - (0.9 * generations))) + 1.1\n thresholds = 0\n\n #Fitness values are calculated based on the new and current sum of squared values\n xi = calc_x(orig_fit, beta, thresholds)\n xj = calc_x(variant_fit, beta, thresholds)\n\n #Fitness values are compared to determine if a mutation should be accepted\n if xj >= xi:\n return 1.0\n #Deleterious mutations are accepted exponentially\n else:\n exponent = -2 * float(Ne) * (xi - xj)\n return safe_calc(exponent)", "def generate_fitness_scores(self):\n desired_destination = (700,300) # map 1\n self.fitness_scores = list()\n for resulting_destination in self.last_generation[\"deaths\"]:\n print(\"resulting_destination: \", resulting_destination)\n dist = data_helpers.distance(resulting_destination, desired_destination)\n print(\"distance: \", dist)\n fitness = 1000-abs(int(dist))\n print(\"fitness: \", fitness)\n self.fitness_scores.append(fitness)", "def fitness(self, x):\n u = np.asarray([x[0]])\n C = self.C_func(u)\n P = self.P\n return np.asarray([np.sum(np.sum((C - P) ** 2, axis=0) ** (1 / 2))])", "def fitness(self):\n # for gene in self.genes:\n starting_gene = min(self.genes, key=lambda x: x.start_time)\n # print(starting_gene.start_time) \n finishing_gene = max(self.genes, key=lambda x: x.start_time + x.finish)\n # print(finishing_gene.start_time)\n self.current_time_value:int = finishing_gene.finish + finishing_gene.start_time - starting_gene.start_time\n return self.current_time_value", "def number_of_iterations(self) -> int:\n return self._stats[\"iter_count\"]", "def eval_fitness(genomes, config):\n for _, genome in genomes:\n cppn = neat.nn.FeedForwardNetwork.create(genome, config)\n network = ESNetwork(SUBSTRATE, cppn, DYNAMIC_PARAMS)\n net = network.create_phenotype_network()\n\n sum_square_error = 0.0\n\n for xor_inputs, xor_expected in zip(XOR_INPUTS, XOR_OUTPUTS):\n new_xor_input = xor_inputs + (1.0,)\n net.reset()\n\n for _ in range(network.activations):\n xor_output = net.activate(new_xor_input)\n\n sum_square_error += ((xor_output[0] - xor_expected[0])**2.0)/4.0\n\n genome.fitness = 1 - sum_square_error", "def calc_average_fitness(self):\n fitness: float = 0\n for c in self.characters:\n fitness += c.fitness\n self.average_fitness = round((fitness / self.size), 3)", "def calculate_population_fitness(self):\n for individual in tqdm(self.current_generation):\n individual.fitness = self.fitness_function(\n individual.genes, self.seed_data)\n log.info(f'Current best validation accuracy: {max([x.fitness for x in self.current_generation])}')", "def calculate_fitness(pTarget, pCandidate):\n\terr = 0\n\tfor i in range(len(pTarget)):\n\t\terr += math.sqrt((pTarget[i] - pCandidate[i]) * (pTarget[i] - pCandidate[i])) \n\tret = 1000 - err\n\tif ret < 0:\n\t\tret = 0\n\treturn ret\n\t#return 1./err", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def number_of_iterations(self):\n return self._solution.nit", "def number_of_iterations(self):\n return self._solution[\"iterations\"]", "def evaluator(self, candidates, args):\r\n fitness = []\r\n if self._use_ants:\r\n for candidate in candidates:\r\n total = 0\r\n for c in candidate:\r\n total += self.weights[c.element[0]][c.element[1]]\r\n last = (candidate[-1].element[1], candidate[0].element[0])\r\n total += self.weights[last[0]][last[1]]\r\n fitness.append(1 / total)\r\n else:\r\n for candidate in candidates:\r\n total = 0\r\n for src, dst in zip(candidate, candidate[1:] + [candidate[0]]):\r\n total += self.weights[src][dst]\r\n fitness.append(1 / total)\r\n return fitness", "def _get_fitness_increes(self, fit_list, func):\n inc = 0\n for index in range(1, len(fit_list)-1):\n inc += fit_list[index] - fit_list[index-1]\n\n return inc", "def _get_total_fitness(self, populations, function):\n total_fit = []\n for population in populations:\n total_fit.append(sum([function.fit(*arg) for arg in population]))\n\n return total_fit", "def fitness(NN):\n x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n expected_y = np.array([[0], [1], [1], [0]])\n y = NN.feed_forward(x)\n error = expected_y - y\n return 1 / (np.square(np.dot(error.T, error)).squeeze() + 0.01)", "def calculate_fitness(self,chromo,index):\n #Incase chromosome has been scheduled before\n chromo.fitness = 0\n \n hold_index = index\n \n #Figure out which room you are in\n data_tuple = self.get_room_day_numbers(hold_index)\n room_id = data_tuple[1]\n print room_id\n #Get Room object\n room = self.config.get_room_by_id(room_id)\n\n course = chromo._class\n #Course might not overlap at current position, but could if duration is\n #longer than 1, this checks for that\n if not chromo.overlap:\n if course.duration > 1:\n count = 0\n while count < (course.duration):\n index += 1\n if index < len(self.chromo_list):\n check_list = self.chromo_list[index]\n if check_list:\n chromo.overlap = True\n count += 1\n\n #Class does not overlap EVER\n if not chromo.overlap:\n chromo.fitness += 1\n\n #Room is able to fit the class\n if course.get_room_size() <= room.get_seat_num():\n chromo.fitness += 1\n\n #Course needs lab and room has lab\n if course.needs_lab():\n if room.lab_status():\n chromo.fitness += 1\n\n #Course doesnt need lab and room doesnt have lab\n if not course.needs_lab():\n if not room.lab_status():\n chromo.fitness += 1\n\n #Only way a Professor will have an overlapping class is if the\n #class overlaps with another class\n if chromo.overlap:\n prof_overlap = False\n index = hold_index\n count = 0\n while count < (course.duration):\n index += count\n check_list = self.chromo_list[index]\n for courses in check_list:\n prof_overlap = course.professor_overlap(courses._class)\n if prof_overlap:\n break\n if prof_overlap:\n break\n count += 1\n\n if not prof_overlap:\n chromo.fitness += 1\n else:\n chromo.fitness += 1\n\n if chromo.fitness is self.best_fitness:\n self.best_of[chromo._class] = self.best_fitness", "def fitness(ch,distance,shift):\n countryNo=len(ch)\n total = 0.\n for c in range(countryNo):\n total += distance[ch[c]][ch[(c+1)%countryNo]]\n if shift - total < 0:\n return 0\n else:\n return shift - total", "def calculate_fitness(dna: str) -> float:\r\n # make minimum fitness 1 to avoid possible division by zero later\r\n fitness = 1\r\n for c in range(DNA_SIZE):\r\n fitness += abs(ord(dna[c]) - ord(OPTIMAL[c]))\r\n return 1 / fitness", "def rank_selection(self, fitness, num_parents):\n\n # This has the index of each solution in the population.\n fitness_sorted = sorted(range(len(fitness)), key=lambda k: fitness[k])\n\n # Rank the solutions based on their fitness. The worst is gives the rank 1. The best has the rank N.\n rank = numpy.arange(1, self.sol_per_pop+1)\n\n probs = rank / numpy.sum(rank)\n\n probs_start, probs_end, parents = self.wheel_cumulative_probs(probs=probs.copy(), \n num_parents=num_parents)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_prob = numpy.random.rand()\n for idx in range(probs.shape[0]):\n if (rand_prob >= probs_start[idx] and rand_prob < probs_end[idx]):\n # The variable idx has the rank of solution but not its index in the population.\n # Return the correct index of the solution.\n mapped_idx = fitness_sorted[idx]\n parents[parent_num, :] = self.population[mapped_idx, :].copy()\n parents_indices.append(mapped_idx)\n break\n\n return parents, numpy.array(parents_indices)", "def number_of_sites(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"number_of_sites\")", "def number_of_sites(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"number_of_sites\")", "def compute_fitness(self, list_cities: list) -> float:\n \n # Inverse fittness because we want total distance as small as posible\n self.fitness = math.e / self.compute_tour_cost(list_cities)\n return self.fitness", "def rng() -> int:", "def find_n_qubits(gates):\n return max((get_maximum_index(g.targets) for g in gates), default=-1) + 1", "def fitness(self, valuation):\n return sum(map(lambda i: i['w'] * self.is_clause_satisfied(valuation, i['clause']), self.clauses))", "def evaluator(self, candidates, args):\r\n fitness = []\r\n if self._use_ants:\r\n for candidate in candidates:\r\n total = 0\r\n for c in candidate:\r\n total += c.value\r\n fitness.append(total)\r\n else:\r\n for candidate in candidates:\r\n total_value = 0\r\n total_weight = 0\r\n for c, i in zip(candidate, self.items):\r\n total_weight += c * i[0]\r\n total_value += c * i[1]\r\n if total_weight > self.capacity:\r\n fitness.append(self.capacity - total_weight)\r\n else:\r\n fitness.append(total_value)\r\n return fitness", "def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size", "def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number", "def valency(self):\n return len(self.neighbors())", "def calculate_fitness(self, obs):\n feature_units = obs.observation.feature_units\n self.fitness = self.initial_enemy_hit_points + self.calculate_hitpoints(\n feature_units, _PLAYER_SELF) - self.calculate_hitpoints(feature_units, _PLAYER_ENEMY)\n return self.fitness / 950", "def test_fitness():\n herb1 = Herbivore(0)\n herb2 = Herbivore(80)\n nt.assert_not_equal(herb1.fitness, herb2.fitness)\n herb3 = Herbivore(20, 0)\n herb4 = Herbivore(20, 80)\n nt.assert_not_equal(herb3.fitness, herb4.fitness)", "def evaulate_fitness_against_pop(self):\n #self.normalize() # Normalize before evaluating\n\n self.fitness = [0] * self.population_size # Reset fitness\n evaluations = [0] * self.population_size\n agents = [self.individual_type(), self.individual_type(), self.individual_type(), self.individual_type()]\n for _ in tqdm(range(self.population_size * self.evaluations_per_chromosome)):\n sample = [random.randint(0,self.population_size - 1) for _ in range(4)] # sample random chromosomes\n for i in range(len(sample)):\n evaluations[sample[i]] += 1\n agents[i].load_chromosome(self.population[sample[i]]) # Load chromosome of random sample [i] into agent[i]\n win_index = evaluate_agents(agents)\n self.fitness[sample[win_index]] += 1 # increment fitness of winner -> amount of wins is obtained\n for i, fitness in enumerate(self.fitness):\n self.fitness[i] = fitness / evaluations[i] # Divide fitness with amount of times chromosome have been evaluated -> win rate is obtained.", "def fitness(self, chromosome):\n node_map = self.network.node_map\n link_dict = self.network.get_link_dictionary()\n gene = 0\n for node in node_map:\n for demand in node_map[node].demand_list:\n for _ in range(demand.lambda_number):\n for link in demand.admissiblePaths[chromosome[gene]].links:\n link_dict[link] += 1\n gene += 1\n if gene >= self.chromosome_length:\n break\n\n f_sum = 0\n err = 0\n for l in link_dict:\n f_sum += link_dict[l]\n if int(link_dict[l]) > self.network.link_capacity:\n err += int(link_dict[l]) - self.network.link_capacity\n\n return self.penalty2 * f_sum + self.penalty1 * err", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def exercise9():\n#seed here is 8\n np.random.seed(seed=8)\n#Generate random numbers from 2 dice for 1000o trials. ranges from [0,5]\n#diceThrows = np.random.randint(6, size=(1000,2))\n #print(diceThrows.shape)\n for i in range(1,11):\n count=0\n diceThrows = np.random.randint(6, size=(1000, 2))\n for x,y in diceThrows:\n if x == 5 and y == 5: #double sixes\n count = count + 1\n\n print(\"Trial \", i, \"= \", count/1000)", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def fitness_tsp(chrom, tsp):\n\n fit = 0\n for i in range(len(chrom)-1):\n cost = tsp.get_weight(chrom[i], chrom[i+1])\n fit += cost\n return fit", "def N_genes_in_dataset(self):\n return len(self.all_genes_in_dataset)", "def __calculate_fitness(self):\n \"\"\"\n Equation:\n f(fenotype) = cos(fenotype) * fenotype + 2\n \"\"\"\n self.fitness = math.cos(self.fenotype) * self.fenotype + 2", "def rand_order_size():\n return poisson(2.0) + 1", "def evaluate(self):\r\n\r\n fitness = 0\r\n\r\n for i in range(len(self.representation)):\r\n # Calculates full distance, including from last city\r\n # to first, to terminate the trip\r\n fitness += distance_matrix[self.representation[i - 1]][self.representation[i]]\r\n\r\n return int(fitness)", "def generator_count(self, gen):\n if len(gen) != 1 or gen.array_form[0][1] < 0:\n raise ValueError(\"gen must be a generator\")\n s = gen.array_form[0]\n return s[1]*sum([abs(i[1]) for i in self.array_form if i[0] == s[0]])", "def sample_fitness(individual):\n\n return individual.dataframe.sample(frac=0.1, random_state=0).mean().mean()", "def calc_fitness(xi, Y, Yhat, c=2):\n\n p = sum(xi) # Number of selected parameters\n n = len(Y) # Sample size\n numer = ((Y - Yhat) ** 2).sum() / n # Mean square error\n pcn = p * (c / n)\n if pcn >= 1:\n return 1000\n denom = (1 - pcn) ** 2\n theFitness = numer / denom\n return theFitness", "def roulette(total_fitness, population):\n pie_slice = random.random() * total_fitness\n fitness_so_far = 0.0\n\n for i in range(len(population)):\n fitness_so_far += population[i].fitness\n\n if fitness_so_far >= pie_slice:\n return population[i]\n return None", "def count(self, value=None):\r\n\t\t_set = list(set(self.sample))\r\n\t\tif value == None: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}\r\n\t\telse:\r\n\t\t\ttry: return {_set[i]: self.sample.count(_set[i]) for i in range(len(_set))}[value]\r\n\t\t\texcept: return 0", "def roll(n, val):\n\tcount = 0\n\tfor i in range(n):\n\t\troll = randint(1, 6) + randint(1, 6)\n\t\tif roll == val: count += 1\n\treturn count" ]
[ "0.6422853", "0.63974106", "0.62456286", "0.6235945", "0.612314", "0.60374177", "0.6036776", "0.59866303", "0.5981496", "0.5973722", "0.5966571", "0.59424984", "0.5921845", "0.5910393", "0.5909234", "0.5907262", "0.59053224", "0.5892766", "0.58769244", "0.5876505", "0.5841656", "0.5802337", "0.57970726", "0.57953894", "0.57186586", "0.5700254", "0.56901973", "0.5672386", "0.56572104", "0.56548774", "0.56309247", "0.5621536", "0.5620949", "0.56013906", "0.5582272", "0.55774623", "0.55752265", "0.55741495", "0.5573163", "0.55723476", "0.5556347", "0.5544834", "0.5534988", "0.5532891", "0.5504438", "0.5499316", "0.5489958", "0.5469764", "0.54607695", "0.5452603", "0.5452603", "0.54461235", "0.5439104", "0.54322755", "0.5414252", "0.54089063", "0.54088014", "0.54070735", "0.53931934", "0.53840154", "0.5370562", "0.5366789", "0.5360586", "0.53485197", "0.5346353", "0.5343981", "0.5337661", "0.5328335", "0.53265625", "0.5315097", "0.53135514", "0.5309122", "0.5306614", "0.5306614", "0.53049487", "0.5298409", "0.5274899", "0.5272365", "0.5264477", "0.5249768", "0.5230549", "0.52067876", "0.5204605", "0.5202724", "0.51999813", "0.5195485", "0.5189376", "0.51740366", "0.5163486", "0.51626295", "0.51621956", "0.5157364", "0.51559234", "0.51535124", "0.515047", "0.51459867", "0.5145635", "0.51441014", "0.513522", "0.51348656" ]
0.7105341
0
4. SEED DISPERSAL PHASE. Seed dispersal is the movement or transport of seeds away from the parent tree. Seeds are randomly distributed over the dimensional search space by random numbers drawn from either a normal distribution or a uniform distribution. In the biology of dispersal, a dispersal vector is "an agent transporting seeds or other dispersal units". Dispersal vectors may include biotic factors, such as animals, or abiotic factors, such as the wind or the ocean. Seeding can either be local (within a dispersal spread around the mother tree) or global, in which case, a dispersal vector transports a seed over a long distance away from the mother tree's location to colonize new areas of forest i.e. exploration phase.
def disperse(self, tree, dtype="normal", n=2): # computes "dispersion spread" for current generation spread = self._dispersion_spread(n) # creates mother tree's offsprings known as seedlings for _ in range(tree.seeds): # creates new seedling by mutation of mother tree's DNA seedling = self._mutate(tree, spread, dtype) # checks boundaries of optimal problem self._check(seedling.vector) # evaluates fitness fitness = self.evaluate(seedling.vector) # adds new seedling to forest self.seedlings.append((fitness, seedling))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deng_random(seeds):\n\n x = seeds[0]\n y = seeds[1]\n\n while 0<1:\n temp = x\n x = (x + y) % 1.0\n y = temp\n yield x", "def seed():", "def seed():\n pass", "def seed():\n pass", "def _seed(self, seed):\n self.world.seed(seed)", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def create_diseasestate_column(num_ppl, seed=1):\n # initialize all 0s array\n initial_diseasestate = np.zeros(num_ppl)\n # infect exactly `seed` number of elements\n initial_diseasestate[np.random.choice(num_ppl, seed)] = 1\n # return infection state\n return initial_diseasestate", "def seed(self, seed=None):\n raise self.gym.seed(seed)", "def initialize_randomness(seed):", "def Randomize(seed=None):\n random.seed()", "def seed(self, evolver: 'Evolver'):\r\n settings = evolver.settings\r\n\r\n self.seed_points_ind = 0\r\n self.sweep_len = 0\r\n if self.evolve_param.categorical:\r\n self.seed_points = self.evolve_param.sample(\r\n evolver.approach_params[self.evolve_param.name], settings.seed_points)\r\n self.sweep_pts = None\r\n self.sweep = dict()\r\n else:\r\n self.seed_points = self.evolve_param.get_seeds(\r\n evolver.approach_params[self.evolve_param.name], settings.seed_points)\r\n self.sweep_pts = np.zeros(\r\n settings.seed_points + settings.salient_points, dtype=self.seed_points.dtype)\r\n self.sweep = np.zeros(self.sweep_pts.shape[0], dtype='float64')\r\n\r\n self.new_sensitives = evolver.sensitive_params.copy()", "def random():\n np.random.seed(1939)", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def seeded_vector(self, seed_string):\n # Note: built-in hash() may vary by Python version or even (in Py3.x) per launch\n once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)\n return (once.rand(self.vector_size) - 0.5) / self.vector_size", "def seed(self, seed: Optional[int]) -> None:\n ...", "def set_global_seeds(seed):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def random_seed(seed):\n state = RandomState()\n random.seed(seed) # alter state\n np.random.seed(seed)\n torch.manual_seed(seed)\n yield\n state.set_global()", "def seed(seed: int) -> None:\n ...", "def Randomize(self, seed):\n return _hypre.HypreParVector_Randomize(self, seed)", "def seed(self, seed: int) -> None:\n self.game.set_seed(seed)", "def test_with_predefined_dist(self, seed):\n dim = Dimension(\"yolo\", dists.norm, 0.9)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert dists.norm.rvs(0.9) == samples[0]", "def __init__(self, seed):\n\n seed = params.integer(seed, from_=0, to=2 ** 32 - 1)\n self._random = np.random.RandomState(seed=seed)", "def set_seeds(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n return None", "def random():\n np.random.seed(0)", "def seed(self, seed=None):\n raise NotImplementedError()", "def seed(self, seed=None):\n raise NotImplementedError()", "def set_seed(self, seed: int):\n self.__sim.seed(seed)", "def set_seed(self, seed):\n self.seed = seed", "def prepare_simulation(master_seed, n_populations):\n nest.ResetKernel()\n # set global kernel parameters\n nest.SetKernelStatus(\n {\"communicate_allgather\": sim.allgather,\n \"overwrite_files\": sim.overwrite_existing_files,\n \"resolution\": sim.dt,\n \"total_num_virtual_procs\": sim.n_vp})\n if sim.to_text_file:\n nest.SetKernelStatus({\"data_path\": data_path_test})\n \n # Set random seeds\n \n # PYNEST\n #nest.sli_run('0 << /rngs [%i %i] Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map >> SetStatus'%(\n # master_seed, master_seed + sim.n_vp - 1))\n #nest.SetKernelStatus({\"rng_seeds\" : range(master_seed, master_seed + sim.n_vp)})\n #nest.sli_run('0 << /grng rngdict/gsl_mt19937 :: %i CreateRNG >> SetStatus'%(master_seed + sim.n_vp))\n #nest.SetKernelStatus({\"grng_seed\" : master_seed + sim.n_vp})\n #pyrngs = [np.random.RandomState(s) for s in \n # range(master_seed + sim.n_vp + 1, master_seed + 2 * sim.n_vp + 1)]\n\n # SLI VERSION\n sli_str = \"0 << \\n\"\n #sli_str += \"/rngs %i [0 %i 1 sub] add Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map\\n\"%(master_seed, sim.n_vp) # local RNG, seeded\n #sli_str += \"/grng rngdict/gsl_mt19937 :: %i %i add CreateRNG\\n\"%(master_seed, sim.n_vp) # global RNG\n sli_str += \"/rng_seeds %i [0 %i 1 sub] add Range\\n\"%(master_seed, sim.n_vp) # local RNG seeds\n sli_str += \"/grng_seed %i %i add\\n\"%(master_seed, sim.n_vp) # global RNG seed\n sli_str += \">> SetStatus\"\n nest.sli_run(sli_str)\n sli_str2 = \"/script_rngs [%i]\\n\"%sim.n_vp\n sli_str2 += \"{%i add rngdict /gsl_mt19937 get exch CreateRNG } Table def\\n\"%(master_seed + sim.n_vp)\n sli_str2 += \"/normal_rdvs script_rngs { rdevdict /normal get CreateRDV } Map def\"\n nest.sli_run(sli_str2)\n pyrngs = None\n return pyrngs", "def generate_db(seed, c, e, s, st, sr, v):\n\n r.seed(seed) #initialize the random number generator with the seed\n sales_db = {} #make an empty dictionary\n\n for i in range(0,v):\n #populate a single record\n new_rec = {'city': r.sample(c, 1)[0], #draw a city\n 'employee' : r.sample(e, 1)[0], #draw a person\n 'sales' : round(r.triangular(s[0],s[1])), #draw a sale value\n 'stay' : round(r.uniform(st[0],st[1])) #how long did it take to close the deal\n }\n if r.random() > sr: #deal fell through\n new_rec['sales'] = 0\n\n #add record to db\n sales_db[i] = new_rec\n\n return sales_db", "def simulate(self):\n self._t = self._t + 1\n if self._t == self._cycle:\n # End of a season, start of the next one. Year is also cyclic that is WINTER -> SPRING.\n self._t = 0\n self._season = self._season.next()\n\n # When the ammount of newly produced food in a cell is over and the cell can seed we\n # randomly choose another spot where some random ammount of newly produced food should\n # be stored.\n for i in range(self._height):\n for j in range(self._width):\n if self._env[i][j].get_newly() == 0 and not self._seeded[i][j]:\n # if the cell become empty just now seed in once in a randomn cell on the grid.\n self._seeded[i][j] = True\n cap = self._height + self._width\n while cap > 0:\n seedi = random.randint(0, self._height - 1)\n seedj = random.randint(0, self._width - 1)\n\n production_cap = self._food_per_season[self._season.value]\n\n production_cap -= self._env[seedi][seedj].get_newly()\n\n if production_cap > 0:\n seed_amount = random.randint(1, production_cap)\n self._env[seedi][seedj].produce(seed_amount)\n self._seeded[seedi][seedj] = False\n break\n\n cap = cap - 1", "def seed(self, seed=None):\r\n if seed is None:\r\n seed = self.default_seed\r\n #backport\r\n #seed = self.default_seed if seed is None else seed\r\n seedgen = numpy.random.RandomState(seed)\r\n for old_r, new_r in self.random_streams.random_state_variables:\r\n old_r_seed = seedgen.randint(2 ** 30)\r\n old_r_container = self.memo[old_r].value\r\n if old_r_container.value is None:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value = numpy.random.RandomState(\r\n int(old_r_seed))\r\n else:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value.seed(int(old_r_seed))", "def set_seed(self,seed):\r\n if seed is None:\r\n warnings.warn(\r\n \"Initializing player with seed from Axelrod module random number generator. \"\r\n \"Results may not be seed reproducible.\")\r\n self._seed = _module_random.random_seed_int()\r\n else:\r\n self._seed = seed\r\n self._random = RandomGenerator(seed=self._seed)\r\n self.base._random = self._random\r\n self.trust._random = self._random\r\n self.conviction._random = self._random\r\n \r\n self.generator = torch.Generator()\r\n self.generator.manual_seed(int(seed))", "def random_seed(self) -> None:\n self.seed = random.SeedSequence().entropy", "def set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n logging.info(f\"Set simulation random seed to: {seed}\")", "def setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True", "def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)", "def set_seed():\n np.random.seed(1423)", "def set_seeds(seed, env=None):\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed)\n random.seed(seed)\n if env is not None:\n env.seed(seed)", "def set_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)", "def seed(seed_value=None):\n np.random.seed(seed_value)", "def init_seed(seed=None):\n if seed is None:\n seed = int(time.time())\n\n LOGGER.info(\"Using seed=%d\", seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)", "def test_seed_coverage(self):\n # Define entry point\n ENTRY = 0x40056d\n\n # We start the execution with a random value located at 0x1000.\n lastInput = list()\n worklist = list([{0x1000: 1}])\n\n while worklist:\n # Take the first seed\n seed = worklist[0]\n\n # Symbolize inputs\n self.symbolize_inputs(seed)\n\n # Init context memory\n self.init_ctxt()\n\n # Emulate\n self.seed_emulate(ENTRY)\n\n lastInput += [dict(seed)]\n del worklist[0]\n\n newInputs = self.new_inputs()\n for inputs in newInputs:\n if inputs not in lastInput and inputs not in worklist:\n worklist += [dict(inputs)]\n\n self.assertIn({4096: 101,\n 4097: 108,\n 4098: 105,\n 4099: 116,\n 4100: 101}, lastInput)", "def set_seed(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)", "def set_all_seeds(seed):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def set_seed(seed: int):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def clairvoyant_agent(self, seeds): \n rewards = []\n for seed in seeds:\n self.env.seed(seed)\n self.env.reset()\n\n # store the initial generation levels\n initial_action = [self.env.state.generator_1_level, self.env.state.generator_2_level]\n\n while not self.env.state.is_done():\n # repeat constant action, just in order to get to the end\n self.env.step(initial_action)\n # read realised demand\n realised_demand = np.diagonal(np.array(env.state.agent_predictions_all))\n # optimise the run cost against (clairvoyant) realised demand, pretending to run at t=-1\n min_cost = agent.full_solution([-1] + initial_action + list(realised_demand))\n # collect (negative) cost\n rewards.append(- min_cost)\n return np.mean(rewards)", "def seed(self, seed):\n\n random.seed(seed)\n np.random.seed(seed)", "def random_seed(seed_value: int) -> None:\r\n np.random.seed(seed_value) # cpu vars\r\n torch.manual_seed(seed_value) # cpu vars\r\n random.seed(seed_value) # Python\r\n if torch.cuda.is_available():\r\n torch.cuda.manual_seed(seed_value)\r\n torch.cuda.manual_seed_all(seed_value) # gpu vars\r\n torch.backends.cudnn.deterministic = True # needed\r\n torch.backends.cudnn.benchmark = False", "def set_random_seeds(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.random.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n logging.debug(\"SystemLog: Set random seed {}\".format(seed))", "def determinize_random_state():\n # force initialization of cuda context for reproducibility\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.enabled = True\n # NOTE it's important to set random seed at the very beginning.\n random_seed = 777\n logger.debug(f'Seeding random state with {random_seed}')\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)", "def test_exact_supercontrolled_decompose_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n self.check_exact_decomposition(random_unitary(4, seed=state).data, decomposer)", "def set_seed(seed: int = None):\n\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(0)", "def set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n # Maybe different op seeds(for dropout) for different procs is better. By:\n # `paddle.seed(args.seed + paddle.distributed.get_rank())`\n paddle.seed(args.seed)", "def run_seed(self, mode):\n # Clear data from tables\n # clear_data()\n if mode == MODE_CLEAR:\n return\n # industry = create_industy()\n # structure_type, structure_type1 = create_structure_type()\n # structure, structure2 = create_structure(structure_type)\n # stock = create_stock(industry, structure)\n # create_price_list(stock)\n # create_news(stock, structure, structure2)\n # create_analysis(structure)\n # create_market_indices()\n create_section_group()", "def _generate_raw_environments(self, num, seed):", "def set_seed(seed: int):\n np.random.seed(seed)\n torch.manual_seed(seed)", "def set_random_seed(seed):\n\n # Sets the seed for the inbuilt Python functions.\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n\n # Sets the seed for the NumPy library.\n np.random.seed(seed)\n\n # Sets the seed for the PyTorch library.\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True", "def set_seed(seeds, simulation_number):\n\n if seeds:\n seed = seeds[simulation_number]\n else:\n seed = None\n np.random.seed(seed)", "def randomize(self, seed_density):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (rand.random() <= seed_density):\r\n self.cells[x][y] = 1", "def set_seed(cls, seed: Any) -> None:\n cls.rand = Random(seed)", "def reproducible(seed: int = 0) -> None:\n\n os.environ[\"PYTHONHASHSEED\"] = \"0\"\n\n np.random.seed(seed)\n python_random.seed(seed)\n tf.random.set_seed(seed)", "def _seed(self, seed=None):\n self.rng, seed = seeding.np_random(seed)\n return [seed]", "def set_random_seed(self, seed):\n np.random.seed(seed)\n return", "def set_random_seed(self, seed):\n np.random.seed(seed)\n return", "def random_test(self, source):\r\n ret = 1\r\n for seed in range(1, 40):\r\n if source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x)**2+10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}) != \\\r\n source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x) ** 2 + 10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}):\r\n ret = 0\r\n if ret == 0:\r\n if self.verbosity > 0:\r\n print(\"ERROR: Random seed non functional, results cannot be replicated.\")\r\n return 0\r\n else:\r\n if self.verbosity > 1:\r\n print(\"Random seed functional, results replicable if a seed is used.\")\r\n return 1", "def test_single_game_seed_works(self):\n sim = ss.Simulation(seed=23)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=23)\n game2 = sim.single_game()\n assert game1 == game2, 'Your seed in Simulation class is not working.'", "def set_random_seed(seed: int):\n return (\n np.random.seed(seed=seed),\n torch.manual_seed(seed=seed),\n random.seed(seed),\n )", "def wood_drum_env(N, sr):\n ## TODO: Fill this in\n return np.zeros(N)", "def seed(self, seed=None):\n # to have a different environment at each time (resolve python random problem)\n self.np_random, seed1 = seeding.np_random(seed)\n seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31\n return [seed1, seed2]", "def seed(seed: int, deterministic: Optional[Union[str, int]] = None) -> None:\n max_val = np.iinfo(np.uint32).max\n min_val = np.iinfo(np.uint32).min\n if seed < min_val or seed > max_val:\n raise ValueError(\n f\"Invalid seed value provided: {seed}. Value must be in the range [{min_val}, {max_val}]\"\n )\n _log.debug(f\"Setting seed to {seed}\")\n\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n if deterministic is not None:\n _log.debug(f\"Setting deterministic debug mode to {deterministic}\")\n torch.set_deterministic_debug_mode(deterministic)\n deterministic_debug_mode = torch.get_deterministic_debug_mode()\n if deterministic_debug_mode == 0:\n _log.debug(\"Disabling cuDNN deterministic mode\")\n torch.backends.cudnn.deterministic = False\n torch.backends.cudnn.benchmark = True\n else:\n _log.debug(\"Enabling cuDNN deterministic mode\")\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def set_random_seed():\n random.seed(DBG_RAND_SEED)\n numpy.random.seed(DBG_RAND_SEED)", "def _set_seed(self) -> None:\r\n random.seed(self.seed)\r\n np.random.seed(self.seed)", "def get_seeds():\n num_clusters = {'Food': 5, 'Decor': 5, 'Service': 5, \n 'Overall': 5, 'None': 10}\n\n e, y = get_labeled_data('./data/labeled_attributes/develop_set')\n clusters = [set() for i in range(len(category_mapping))]\n for i in range(len(e)):\n clusters[y[i]].add(e[i])\n\n # In order to pick seeds that cover each category well, cluster the labeled \n # attrs in each category and take the centers of those clusters as seeds\n seeds = {}\n for i in range(len(clusters)):\n category = inverse_category_mapping[i]\n c = clusters[i]\n l = list(c)\n initial = [set([l[j]]) for j in range(num_clusters[category])]\n subclusters = iterative_cluster(initial, c, average_distance, 40)\n seeds[category] = [s[0] for s in subclusters]\n\n return seeds", "def choose_new_seed(self):\r\n if self.rerandomize == 'never':\r\n self.seed = 1\r\n elif self.rerandomize == \"per_student\" and hasattr(self.runtime, 'seed'):\r\n # see comment on randomization_bin\r\n self.seed = randomization_bin(self.runtime.seed, unicode(self.location).encode('utf-8'))\r\n else:\r\n self.seed = struct.unpack('i', os.urandom(4))[0]\r\n\r\n # So that sandboxed code execution can be cached, but still have an interesting\r\n # number of possibilities, cap the number of different random seeds.\r\n self.seed %= MAX_RANDOMIZATION_BINS", "def set_seed(seed=0):\n # Python std lib random seed\n random.seed(seed)\n # Numpy, tensorflow, torch\n torch.manual_seed(seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n # Additional seeds potentially required when using a gpu\n # (see https://www.youtube.com/watch?v=TB07_mUMt0U&t=1804s)\n os.environ['TF_CUDNN_DETERMINISTIC'] = 'true'\n os.environ['TF_DETERMINISTIC_OPS'] = 'true'\n os.environ['PYTHONHASHSEED'] = str(seed)", "def testDistributionOfStatelessRandomNormal(self, dtype, seed):\n with self.session() as sess, self.test_scope():\n seed_t = array_ops.placeholder(dtypes.int32, shape=[2])\n n = 1000\n x = stateless.stateless_random_normal(shape=[n], seed=seed_t, dtype=dtype)\n y = sess.run(x, {seed_t: seed})\n # The constant 2.492 is the 5% critical value for the Anderson-Darling\n # test where the mean and variance are known. This test is probabilistic\n # so to avoid flakiness the seed is fixed.\n self.assertLess(random_test_util.anderson_darling(y.astype(float)), 2.492)", "def set_seed(seed=42):\n np.random.seed(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)", "def seed(self, seed=None):\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n return [seed]", "def set_seeds(seed=42):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False # for faster training, but not deterministic", "def test_exact_supercontrolled_decompose_phase_0_use_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(0, 0, 0) @ tgt_k2\n self.check_exact_decomposition(tgt_unitary, decomposer, num_basis_uses=0)", "def seed_everything(seed):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def seed_all(seed):\n\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)", "def build_data(seed):\n rs = np.random.RandomState(seed)\n\n def y(x):\n \"\"\" y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4 \"\"\"\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4\n\n xtrain = rs.rand(10000, 4)\n xtest = rs.rand(1000, 4)\n ytrain = y(xtrain) + rs.rand(10000) / 10\n ytest = y(xtest) + rs.rand(1000) / 10\n return xtrain, xtest, ytrain, ytest", "def initialize(self, seed=None):\r\n self.seed(seed)", "def make_random_supercontrolled_decomposer(self, seed):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = state.random() * np.pi / 4\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary))\n return decomposer", "def _use_seed(seed):\n torch_rng_state = torch.random.get_rng_state()\n torch.manual_seed(seed)\n yield\n torch.random.set_rng_state(torch_rng_state)", "def seed_random():\n random.seed(0)", "def __init__(self, num_instances: int, seed: float = 42):\n self._num_instances = num_instances\n self._seed = seed\n # apply seed\n random.seed(self._seed)\n np.random.seed(self._seed)", "def set_seed(seed):\n assert (type(seed) == int and seed >= 0)\n return params_func(cmd, \"set_seed\", params=[seed])", "def check_seed():\n np.random.seed(1000)\n standard = [\n {0: -3.0, 1: -5.0, 'index': 0},\n {0: -6.0, 1: -8.0, 'index': 1},\n {0: 5.0, 1: -1.0, 'index': 2},\n {0: 1.0, 1: -7.0, 'index': 3},\n {0: -2.0, 1: -3.0, 'index': 4},\n {0: 7.0, 1: 3.0, 'index': 5},\n {0: -4.0, 1: -2.0, 'index': 6},\n {0: 2.0, 1: 6.0, 'index': 7}\n ]\n\n this_machine = create_points(8)\n\n flag = True\n for i in range(8) :\n flag &= this_machine[i][0] == standard[i][0] \n flag &= this_machine[i][1] == standard[i][1] \n flag &= this_machine[i][\"index\"] == i\n \n if not flag :\n print(\"\"\"\n The Python installation on this machine is odd: it appears to\n use a non-standard random number generator -- run \n this script on the machines in the Otter lab instead.\n If that fails too, send an email to ag0015@surrey.ac.uk.\n \"\"\")\n print (\"You got these test points:\", this_machine)\n print (\"You should have got:\", standard)\n exit(-1)\n else :\n print (\"Check passed\")", "def seed(self, seed=None):\n if seed is not None:\n self._rng.seed(seed)", "def test_repeated_simuations_with_fixed_seed(self):\n random.seed(175203)\n expected_results = {-1: 5, 0: 36, 1: 43, 2: 16}\n self._setup_random_gen([0.01, 0.3, 0.58, 0.1, 0.01], [-1, 0, 1, 2, 3])\n\n simulation_results = Counter()\n for _ in range(100):\n simulation_results[self._random_gen.next_num()] += 1\n\n self.assertDictEqual(simulation_results, expected_results)", "def test_dense_transmission(self):\n # generate pseudo-random test case\n f = 0.5\n\n np.random.seed(6564)\n self.syn_dense.W = np.random.randn(self.M, self.N)\n self.syn_dense.f_nmda = f\n\n sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)\n sim.run(self.t_max)\n\n expected_ampa = np.zeros(self.M)\n expected_nmda = np.zeros(self.M)\n v = np.zeros(self.M)\n\n for i in xrange(len(self.G.spike_table)):\n v += self.T.v_step\n effect = np.dot(self.syn_dense.W, self.G.spike_table[i])\n expected_ampa += effect\n expected_nmda += effect/(1.0 + self.syn_1t1.mg/3.57*\n np.exp(-v/16.13))\n\n self.assertTrue(np.allclose((1-f)*expected_ampa, self.T.i_ampa))\n self.assertTrue(np.allclose(f*expected_nmda, self.T.i_nmda))", "def main():\n args = get_args()\n seed = args.seed\n player_hits = args.player_hits\n dealer_hits = args.dealer_hits\n\n random.seed(seed)\n #random.Random(seed_arg)\n\n suites = list('♥♠♣♦')\n\n # numbs = list(range(2,11))\n # face_cards = list('JQKA')\n #\n # numbs_cards = [str(s) for s in numbs]\n # numbs_cards += face_cards\n\n #create cards and values\n cards = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\n values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n\n #create dict of card values\n values = dict(zip(cards,values))\n\n #produce the deck of chards\n deck = list(product(suites, cards))\n\n #sort the deck of cards\n deck.sort()\n\n #shuffle deck of cards\n random.shuffle(deck)\n #print(deck)\n\n dealer = []\n player = []\n\n cards_dealt = 4\n\n while cards_dealt:\n player.append(deck.pop())\n cards_dealt -= 1\n\n dealer.append(deck.pop())\n cards_dealt -= 1\n\n if player_hits is True:\n player.append(deck.pop())\n\n if dealer_hits is True:\n dealer.append(deck.pop())\n\n player_sum = sum([values[t[1]] for t in player])\n\n dealer_sum = sum([values[t[1]] for t in dealer])\n\n dealer_string = ' '.join([''.join(x) for x in dealer])\n\n player_string = ' '.join([''.join(x) for x in player])\n\n #print out results:\n print('D [{:>2}]: {}'.format(dealer_sum, dealer_string ))\n print('P [{:>2}]: {}'.format(player_sum, player_string ))\n\n #Check if the player has more than 21; if so, print 'Player busts! You lose, loser!' and exit(0)\n if player_sum > 21:\n print('Player busts! You lose, loser!')\n sys.exit(0)\n\n #Check if the dealer has more than 21; if so, print 'Dealer busts.' and exit(0)\n if dealer_sum > 21:\n print('Dealer busts.')\n sys.exit(0)\n\n #Check if the player has exactly 21; if so, print 'Player wins. You probably cheated.' and exit(0)\n if player_sum == 21:\n print('Player wins. You probably cheated.')\n sys.exit(0)\n\n #Check if the dealer has exactly 21; if so, print 'Dealer wins!' and exit(0)\n if dealer_sum == 21:\n print('Dealer wins!')\n sys.exit(0)\n\n #check if dealer and player should hiting\n if dealer_sum < 18:\n print('Dealer should hit.')\n if player_sum < 18:\n print('Player should hit.')", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def generate_random_walker():\n # must have seeds that generate known problems\n must_have_seeds = [112, 308, 393]\n for seed in must_have_seeds:\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections\n while True:\n seed = random.randint(0, 2**10)\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections", "def stellar_seed(self) -> str:\n return kin_utils.encode_check('seed', bytes(self._signing_key)).decode()", "def run_seeds(self, nbrun):\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_RUNNING_SEEDS)\n self._notify_listeners_start_operation(listener.OPERATION_RUN_SEEDS)\n rsol = self.agent.run_seeds(nbrun)\n self._set_status(STATUS_IDLE)\n self._notify_listeners_end_operation()\n return rsol" ]
[ "0.62226486", "0.6216759", "0.6068667", "0.6068667", "0.60506463", "0.6025185", "0.5997389", "0.5925686", "0.59127784", "0.59025425", "0.5878437", "0.5868725", "0.5820426", "0.57940364", "0.5784051", "0.5725177", "0.5720389", "0.56988484", "0.56769013", "0.5634516", "0.56209517", "0.5619261", "0.56097734", "0.5609323", "0.5607152", "0.5599526", "0.55857503", "0.55857503", "0.5580477", "0.55753857", "0.5546274", "0.5543842", "0.5543338", "0.5535447", "0.5526872", "0.5505105", "0.5502824", "0.5490972", "0.5485949", "0.5472872", "0.54627186", "0.54405016", "0.5439252", "0.5425762", "0.5423798", "0.5420863", "0.5414301", "0.5413757", "0.5412845", "0.54068464", "0.5399127", "0.5395841", "0.53939515", "0.53925616", "0.53723407", "0.5372052", "0.53624046", "0.5359407", "0.5354893", "0.5342235", "0.5336355", "0.53296715", "0.53173894", "0.5314929", "0.53121895", "0.52989405", "0.52989405", "0.5293453", "0.5273945", "0.5272652", "0.52707374", "0.5266673", "0.5260446", "0.525703", "0.52566814", "0.5254908", "0.52432775", "0.5240732", "0.52385324", "0.5237285", "0.5236267", "0.52353066", "0.522926", "0.5227466", "0.52170855", "0.5196053", "0.5194878", "0.51937604", "0.5189567", "0.5188187", "0.5186059", "0.5182943", "0.5181644", "0.5179479", "0.5179295", "0.5177322", "0.5167349", "0.5162739", "0.5161753", "0.51597357", "0.5154218" ]
0.0
-1
Creates a seedling by randomly mutating the DNA of its mother tree. For each seed, a coin toss decides whether a local or global seeding takes place to ensure constant exploration of the search space.
def _mutate(self, tree, spread, dtype): # defines wrapper functions def uniform(lower, upper): """ Draws a random float number from a uniform distribution given by U[lower, upper]. """ return lower + random.random() * (upper - lower) def normal(mean, std): """ Draws a random float number from a normal distribution with mean 'mu' and standard deviation 'sigma': N[mu, sigma]. """ return random.gauss(mean, std) # creates a seedling based on the DNA of its mother tree new_tree = copy.deepcopy(tree) # trade-off between exploitation and exploration if (random.random() > self.epsilon): # mutates initial solution vector - i.e. local seeding for i in range(self.dim): if (random.random() < self.mut_proba): if (dtype == "normal"): new_tree.vector[i] += normal(0, spread) elif (dtype == "uniform"): new_tree.vector[i] += uniform(-1, 1) else: raise AttributeError("'dtype' must either be 'normal' or 'uniform'.") else: # explores new region of the search space - i.e. global seeding new_tree = Tree(self.lower, self.upper) return new_tree
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seed():", "def generate_random_walker():\n # must have seeds that generate known problems\n must_have_seeds = [112, 308, 393]\n for seed in must_have_seeds:\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections\n while True:\n seed = random.randint(0, 2**10)\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections", "def _seed(self, seed):\n self.world.seed(seed)", "def seed():\n pass", "def seed():\n pass", "def Randomize(seed=None):\n random.seed()", "def seed(self, seed=None):\n raise self.gym.seed(seed)", "def seed(self, seed=None):\r\n if seed is None:\r\n seed = self.default_instance_seed\r\n\r\n seedgen = numpy.random.RandomState(seed)\r\n for old_r, new_r in self.state_updates:\r\n old_r_seed = seedgen.randint(2 ** 30)\r\n old_r.set_value(numpy.random.RandomState(int(old_r_seed)),\r\n borrow=True)", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def seed(self, seed=None):\r\n if seed is None:\r\n seed = self.default_seed\r\n #backport\r\n #seed = self.default_seed if seed is None else seed\r\n seedgen = numpy.random.RandomState(seed)\r\n for old_r, new_r in self.random_streams.random_state_variables:\r\n old_r_seed = seedgen.randint(2 ** 30)\r\n old_r_container = self.memo[old_r].value\r\n if old_r_container.value is None:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value = numpy.random.RandomState(\r\n int(old_r_seed))\r\n else:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value.seed(int(old_r_seed))", "def initialize_randomness(seed):", "def set_global_seeds(seed):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def random_seed(seed):\n state = RandomState()\n random.seed(seed) # alter state\n np.random.seed(seed)\n torch.manual_seed(seed)\n yield\n state.set_global()", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def deng_random(seeds):\n\n x = seeds[0]\n y = seeds[1]\n\n while 0<1:\n temp = x\n x = (x + y) % 1.0\n y = temp\n yield x", "def generate_seeds_and_models(args, synced_model, env):\n np.random.seed()\n random_seed = np.random.randint(2**30)\n two_models = perturb_model(args, synced_model, random_seed, env)\n return random_seed, two_models", "def seed(self, seed: int) -> None:\n self.game.set_seed(seed)", "def test_seed_diff():\n\n skip_if_no_scipy()\n\n rng = np.random.RandomState([1,2,3])\n\n #the number in the argument here is the limit on\n #seed value, and we subtract 1 so it will be\n #possible to add 1 to it for the second MND\n seed = rng.randint(2147462579) -1\n\n dim = 3\n\n mu = rng.randn(dim)\n\n rank = dim\n\n X = rng.randn(rank,dim)\n\n cov = np.dot(X.T,X)\n\n mnd1 = MND( sigma = cov, mu = mu, seed = seed)\n\n num_samples = 5\n\n rd1 = mnd1.random_design_matrix(num_samples)\n rd1 = function([],rd1)()\n\n mnd2 = MND( sigma = cov, mu = mu, seed = seed + 1)\n\n rd2 = mnd2.random_design_matrix(num_samples)\n rd2 = function([],rd2)()\n\n assert np.any(rd1 != rd2)", "def seed(self, seed=None):\n # to have a different environment at each time (resolve python random problem)\n self.np_random, seed1 = seeding.np_random(seed)\n seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31\n return [seed1, seed2]", "def choose_new_seed(self):\r\n if self.rerandomize == 'never':\r\n self.seed = 1\r\n elif self.rerandomize == \"per_student\" and hasattr(self.runtime, 'seed'):\r\n # see comment on randomization_bin\r\n self.seed = randomization_bin(self.runtime.seed, unicode(self.location).encode('utf-8'))\r\n else:\r\n self.seed = struct.unpack('i', os.urandom(4))[0]\r\n\r\n # So that sandboxed code execution can be cached, but still have an interesting\r\n # number of possibilities, cap the number of different random seeds.\r\n self.seed %= MAX_RANDOMIZATION_BINS", "def random():\n np.random.seed(1939)", "def seed(self, seed=None):\n raise NotImplementedError()", "def seed(self, seed=None):\n raise NotImplementedError()", "def seed_all_rng(seed=None):\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)", "def _use_seed(seed):\n torch_rng_state = torch.random.get_rng_state()\n torch.manual_seed(seed)\n yield\n torch.random.set_rng_state(torch_rng_state)", "def seed_all(seed):\n\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)", "def set_all_seeds(seed):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def seed_everything(seed):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def seed(self, seed: Optional[int]) -> None:\n ...", "def seed_all_rng(seed=None):\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.set_rng_state(torch.manual_seed(seed).get_state())\n random.seed(seed)", "def set_seed(cls, seed: Any) -> None:\n cls.rand = Random(seed)", "def _do_update(self):\n sample = np.random.choice(self._seeds, 1, replace=False, p=self._seed_weights)[0]\n index = self._seeds.index(sample)\n new_seed = random.choice([neb for neb in self._graph.neighbors(sample)])\n self._edges.add((sample, new_seed))\n self._nodes.add(sample)\n self._nodes.add(new_seed)\n self._seeds[index] = new_seed", "def seed_random():\n random.seed(0)", "def set_seed(self, seed=None):\n super().set_seed(seed=seed)\n for t in self.policy_list:\n t.set_seed(self._random.random_seed_int())", "def seed(self, seed=None):\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n return [seed]", "def set_seed(self,seed):\r\n if seed is None:\r\n warnings.warn(\r\n \"Initializing player with seed from Axelrod module random number generator. \"\r\n \"Results may not be seed reproducible.\")\r\n self._seed = _module_random.random_seed_int()\r\n else:\r\n self._seed = seed\r\n self._random = RandomGenerator(seed=self._seed)\r\n self.base._random = self._random\r\n self.trust._random = self._random\r\n self.conviction._random = self._random\r\n \r\n self.generator = torch.Generator()\r\n self.generator.manual_seed(int(seed))", "def seed_everything(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.backends.cudnn.deterministic = cudnn_deterministic", "def seed(seed: int) -> None:\n ...", "def _seed(self, seed=None):\n self.rng, seed = seeding.np_random(seed)\n return [seed]", "def random_seed(self) -> None:\n self.seed = random.SeedSequence().entropy", "def set_seeds(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n return None", "def mutate(genome):\n mutated_genome = copy.deepcopy(genome) # make a copy of the DNA to mutate\n seed = random.randint(0,3)\n if len(mutated_genome) == 0: seed = 0\n if seed == 0:\n mutate_chromosome(mutated_genome)\n elif seed == 1:\n mutate_point(mutated_genome)\n elif seed == 2:\n mutate_color(mutated_genome)\n else: #seed ==3:\n mutate_opacity(mutated_genome)\n return mutated_genome", "def seed_rng(self, seed: int | Sequence[int] | None) -> None:\n super().seed_rng(seed)\n self.rng = numpy.random.RandomState(seed)", "def seed(self, seed=None):\n if seed is not None:\n self._rng.seed(seed)", "def seed(self, seed):\n\n random.seed(seed)\n np.random.seed(seed)", "def random():\n np.random.seed(0)", "def generate_world(world_seed, biome_min, biome_max, w, h):\n\n while True:\n\n try:\n\n # Set the initial seed for the random module (random.seed())\n seed(world_seed)\n\n # Create a blank map (2D list filled with '0' strings\n world = [[0 for y in range(h)] for x in range(w)]\n # Generates the random values for the terrain construction\n terrain = [randrange(20) + 40 for _ in range(w)]\n\n #Empty biome map\n biomes = []\n\n #Generates biomes\n for __ in range(w//biome_min):\n\n #Biome at cursor\n biome_select = choice(list(biome_data))\n\n #Biomes size\n for _ in range(randint(biome_min, biome_max)):\n biomes.append(biome_select)\n\n #World size met\n if len(biomes) >= w:\n biomes = biomes[:w] #Truncate selection\n break\n\n\n # ----- Construct the Terrain\n # Counter that changes dynamically to check through all blocks in the terrain list\n cur_pos = 0\n # Runs through all the generated numbers in a while loop\n while cur_pos < w:\n\n # print(\".\", end=\"\")\n\n # Check to see if terrain gap is too large\n\n if abs(terrain[cur_pos] - terrain[cur_pos - 1]) > biome_data[str(biomes[cur_pos])][\"maxh\"]: # if terrain gap is larger than threshhold (too big)\n\n for n in range(randint(biome_data[str(str(biomes[cur_pos]))][\"minx\"], biome_data[str(str(biomes[cur_pos]))][\"maxx\"])):\n # Insert a new value into the terrain list between the values that are too far apart\n terrain.insert(cur_pos, (terrain[cur_pos] + terrain[cur_pos - 1]) // 2)\n\n else: # Difference between the two blocks is not too big\n\n # Check next block\n cur_pos += 1\n\n # ----- Transfer Terrain To Empty World\n # Run through every space in the empty world\n for x in range(len(world)): # runs through each level\n for y in range(len(world[x])): # runs through each individual space\n\n # Generates structures\n if y > terrain[x]:\n\n #Top layer\n if y - terrain[x] == 1:\n\n #Sets the layer with block specified in biome config\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"top\"]]\n\n if randint(0, 10) == 0 and x + 10 < w:\n world = generate_structure(x, y - 1, world, choice(biome_data[biomes[x]][\"structure\"]))\n\n #Middle layer\n elif y - terrain[x] < randint(3, 8):\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"middle\"]]\n\n #Base\n else:\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"lower\"]]\n\n #Generate ores\n # Coal\n if 10 + terrain[x] > y > 5 + terrain[x] and randint(0, 200) == 0:\n for cluster in range(randint(3, 10)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Coal Ore\"]\n\n # Iron\n if 30 + terrain[x] > y > 20 + terrain[x] and randint(0, 200) == 0:\n\n for cluster in range(randint(3, 6)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Iron Ore\"]\n\n # Gold\n if 80 > y > 65 and randint(0, 400) == 0:\n for cluster in range(randint(3, 6)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Gold Ore\"]\n\n # Diamonds\n if 80 > y > 70 and randint(0, 500) == 0:\n for cluster in range(randint(1, 5)):\n world[x + randint(-3, 3)][y + randint(-3, 3)] = block_lookup[\"Diamond Ore\"]\n\n # Bedrock\n if y > 92 or y > 87 and randint(0, 3) == 0:\n world[x][y] = block_lookup[\"Bed Rock\"]\n\n # Last edit, adding extras to the top of the world to prevent problems\n world = [[0] * 40 + x for x in world]\n\n # Return the world object for use\n return np.array(world)\n\n except:\n world_seed += '1'", "def setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True", "def set_seed(self, seed):\n self.seed = seed", "def rngreset(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True", "def __init__(self, seed=__default):\n\n seed = self.__default if seed == 0 else seed\n self.__mt[0] = seed & self.__genm\n for i in range(1, self.__n):\n self.__mt[i] = (self.__genp * self.__mt[i-1]) & self.__genm", "def disperse(self, tree, dtype=\"normal\", n=2):\n\n # computes \"dispersion spread\" for current generation\n spread = self._dispersion_spread(n)\n\n # creates mother tree's offsprings known as seedlings\n for _ in range(tree.seeds):\n\n # creates new seedling by mutation of mother tree's DNA\n seedling = self._mutate(tree, spread, dtype)\n\n # checks boundaries of optimal problem\n self._check(seedling.vector)\n\n # evaluates fitness\n fitness = self.evaluate(seedling.vector)\n\n # adds new seedling to forest\n self.seedlings.append((fitness, seedling))", "def generate_seed():\n global seed\n seed = []\n\n for char_id in range(0, len(printable)):\n while True:\n char_sequence = [printable[randint(0, len(printable)-1)], printable[randint(0, len(printable)-1)]]\n if char_sequence not in seed:\n break\n seed.append(char_sequence)", "def set_all_random_seeds(self, random_seed):\n np.random.seed(random_seed)\n tf.random.set_seed(random_seed)", "def set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n # Maybe different op seeds(for dropout) for different procs is better. By:\n # `paddle.seed(args.seed + paddle.distributed.get_rank())`\n paddle.seed(args.seed)", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def mutate_chromosome(mutated_genome):\n seed = random.randint(0,5)\n if len(mutated_genome) <= 1: seed = 0\n if seed == 0:\n insert_chromosome(mutated_genome)\n elif seed == 1:\n remove_chromosome(mutated_genome)\n elif seed == 2:\n switch_chromosomes(mutated_genome)\n elif seed == 3:\n shuffle_chromosomes(mutated_genome)\n elif seed == 4:\n increment_chromosome(mutated_genome)\n else: #seed == 5:\n decrement_chromosome(mutated_genome)", "def fix_seeds(\n seed=90,\n set_system=True,\n set_torch=True,\n set_torch_cudnn=True):\n # set system seed\n if set_system:\n np.random.seed(seed)\n random.seed(seed)\n\n # set torch seed\n if set_torch:\n torch.manual_seed(seed)\n\n # set torch cudnn backend\n if set_torch_cudnn:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def NewRndSeed(ss):\n ss.RndSeed = int(datetime.now(timezone.utc).timestamp())", "def seed_all(seed: int = 1930):\n print(\"Using Seed Number {}\".format(seed))\n\n os.environ[\"PYTHONHASHSEED\"] = str(\n seed\n ) # set PYTHONHASHSEED env var at fixed value\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.cuda.manual_seed(seed) # pytorch (both CPU and CUDA)\n np.random.seed(seed) # for numpy pseudo-random generator\n random.seed(seed) # set fixed value for python built-in pseudo-random generator\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.enabled = False", "def set_seeds(seed=42):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False # for faster training, but not deterministic", "def initialize(self, seed=None):\r\n self.seed(seed)", "def seed (self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def seed(self, seed=None):\n #restore a previous state\n if seed is not None: self._seed(seed)\n \n #now generate a new seed and reseed\n seed = self.generate_seed()\n self._seed(seed)", "def generation(self):\n\n # Re-count and re-collect all root Teams from the main Team\n # population into self.agent_pop\n self.updateAgentPopulation()\n\n # Generate new root Teams as variations of other Teams\n while self.getNumRootTeams() < Trainer.R_SIZE:\n\n # Randomly select parent\n parent = choice(self.agent_pop)\n\n # Copy the parent Team and perform mutation. Note that mutation\n # may result in the creation of new root Teams\n child = Team(parent.team)\n self.mutateTeam(child)\n\n # Add new Team to the Team populations\n self.team_pop.append(child)\n\n # Since mutation can theoretically cause new root Teams to be created,\n # run a check for this just out of curiosity.\n if self.getNumRootTeams() != Trainer.R_SIZE:\n print(\"NOTE - Trainer::generation - self.getNumRootTeams() != Trainer.R_SIZE\")\n print(\" self.getNumRootTeams() =\", self.getNumRootTeams())", "def i_seed(seed, flag):\n global randrsl, mm\n\n for i in range(0 , 256):\n mm[i] = 0\n\n m = len(seed)\n\n for i in range(0, 256):\n if i >= m:\n randrsl[i] = 0\n else:\n randrsl[i] = seed[i]\n\n rand_init(flag)", "def seed_all(seed=None):\n if seed is None:\n # pick a random uint32 seed\n seed = random.randint(np.iinfo(np.uint32).min, np.iinfo(np.uint32).max)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.enabled = False\n\n return seed", "def createRandom(protein):\n\n protein.occupied = []\n protein.aminoList = []\n\n for id in range(protein.proteinLength):\n protein.aminoList.append(Amino(id, protein.proteinString[id]))\n\n # Place the first and second amino acid\n if id in {0, 1}:\n thisCoordinate = [0, id]\n if protein.plane == \"3D\":\n thisCoordinate.append(0)\n protein.aminoList[id].coordinate = thisCoordinate\n protein.occupied.append(thisCoordinate)\n else:\n prevCo = protein.aminoList[(id - 1)].coordinate\n posCo = protein.getSurroundCo(prevCo, occupied=False)\n\n # If there are no surrounding coordinates available stop the folding\n if not posCo:\n protein.stability = 0\n return False\n\n coordinate = random.choice(posCo)\n protein.aminoList[id].coordinate = coordinate\n protein.occupied.append(coordinate)\n\n protein.stabilityUpdate(protein.aminoList[id])\n\n return True", "def seed_worker(_worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def mutate_color(mutated_genome):\n seed = random.randint(0,2)\n if seed == 0:\n new_color(mutated_genome)\n elif seed == 1:\n change_color(mutated_genome)\n else: #seed == 2:\n switch_colors(mutated_genome)\n #else: seed == 3: # depricated\n # shuffle_colors(mutated_genome)", "def set_seeds(seed, env=None):\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed)\n random.seed(seed)\n if env is not None:\n env.seed(seed)", "def run(self, seed=None):\n if seed is not None:\n random_seed.set_seed(seed)\n self.reset()", "def initLocalBestChoice(self):\n random.seed()\n return", "def set_global_seed(seed: int):\n\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True", "def prepare_simulation(master_seed, n_populations):\n nest.ResetKernel()\n # set global kernel parameters\n nest.SetKernelStatus(\n {\"communicate_allgather\": sim.allgather,\n \"overwrite_files\": sim.overwrite_existing_files,\n \"resolution\": sim.dt,\n \"total_num_virtual_procs\": sim.n_vp})\n if sim.to_text_file:\n nest.SetKernelStatus({\"data_path\": data_path_test})\n \n # Set random seeds\n \n # PYNEST\n #nest.sli_run('0 << /rngs [%i %i] Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map >> SetStatus'%(\n # master_seed, master_seed + sim.n_vp - 1))\n #nest.SetKernelStatus({\"rng_seeds\" : range(master_seed, master_seed + sim.n_vp)})\n #nest.sli_run('0 << /grng rngdict/gsl_mt19937 :: %i CreateRNG >> SetStatus'%(master_seed + sim.n_vp))\n #nest.SetKernelStatus({\"grng_seed\" : master_seed + sim.n_vp})\n #pyrngs = [np.random.RandomState(s) for s in \n # range(master_seed + sim.n_vp + 1, master_seed + 2 * sim.n_vp + 1)]\n\n # SLI VERSION\n sli_str = \"0 << \\n\"\n #sli_str += \"/rngs %i [0 %i 1 sub] add Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map\\n\"%(master_seed, sim.n_vp) # local RNG, seeded\n #sli_str += \"/grng rngdict/gsl_mt19937 :: %i %i add CreateRNG\\n\"%(master_seed, sim.n_vp) # global RNG\n sli_str += \"/rng_seeds %i [0 %i 1 sub] add Range\\n\"%(master_seed, sim.n_vp) # local RNG seeds\n sli_str += \"/grng_seed %i %i add\\n\"%(master_seed, sim.n_vp) # global RNG seed\n sli_str += \">> SetStatus\"\n nest.sli_run(sli_str)\n sli_str2 = \"/script_rngs [%i]\\n\"%sim.n_vp\n sli_str2 += \"{%i add rngdict /gsl_mt19937 get exch CreateRNG } Table def\\n\"%(master_seed + sim.n_vp)\n sli_str2 += \"/normal_rdvs script_rngs { rdevdict /normal get CreateRDV } Map def\"\n nest.sli_run(sli_str2)\n pyrngs = None\n return pyrngs", "def fuse_seed_creation_pass(graph: torch.fx.Graph):\n device_seeds = collections.defaultdict(list)\n for node in graph.nodes:\n if CallFunctionVarArgs(inductor_prims.seed).match(node):\n device_seeds[node.args[0]].append(node)\n\n if not device_seeds:\n return 0\n\n for device, seeds in device_seeds.items():\n with graph.inserting_before(seeds[0]):\n combined = graph.call_function(inductor_prims.seeds, (len(seeds), device))\n with V.fake_mode:\n combined.meta[\"val\"] = torch.empty(\n [len(seeds)], device=device, dtype=torch.int64\n )\n combined.meta[\"tensor_meta\"] = _extract_tensor_metadata(\n combined.meta[\"val\"]\n )\n\n for idx, seed in enumerate(seeds):\n with graph.inserting_before(seed):\n new_seed = graph.call_function(\n inductor_prims.lookup_seed, (combined, idx)\n )\n seed.replace_all_uses_with(new_seed)\n new_seed.meta.update(seed.meta)\n graph.erase_node(seed)\n\n return len(device_seeds)", "def SetRandomSeed(seed):\n global option\n option['random_seed'] = seed", "def test_seed_same():\n\n skip_if_no_scipy()\n\n rng = np.random.RandomState([1,2,3])\n\n #the number in the argument here is the limit on\n #seed value\n seed = rng.randint(2147462579)\n\n dim = 3\n\n mu = rng.randn(dim)\n\n rank = dim\n\n X = rng.randn(rank,dim)\n\n cov = np.dot(X.T,X)\n\n mnd1 = MND( sigma = cov, mu = mu, seed = seed)\n\n num_samples = 5\n\n rd1 = mnd1.random_design_matrix(num_samples)\n rd1 = function([],rd1)()\n\n mnd2 = MND( sigma = cov, mu = mu, seed = seed)\n\n rd2 = mnd2.random_design_matrix(num_samples)\n rd2 = function([],rd2)()\n\n assert np.all(rd1 == rd2)", "def set_seeds(seed: int=42):\n # Set the seed for general torch operations\n torch.manual_seed(seed)\n # Set the seed for CUDA torch operations (ones that happen on the GPU)\n torch.cuda.manual_seed(seed)", "def set_seed():\n np.random.seed(1423)", "def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def temp_seed(cntxt_seed):\n state = np.random.get_state()\n np.random.seed(cntxt_seed)\n try:\n yield\n finally:\n np.random.set_state(state)", "def random_test(self, source):\r\n ret = 1\r\n for seed in range(1, 40):\r\n if source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x)**2+10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}) != \\\r\n source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x) ** 2 + 10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}):\r\n ret = 0\r\n if ret == 0:\r\n if self.verbosity > 0:\r\n print(\"ERROR: Random seed non functional, results cannot be replicated.\")\r\n return 0\r\n else:\r\n if self.verbosity > 1:\r\n print(\"Random seed functional, results replicable if a seed is used.\")\r\n return 1", "def set_torch_seed(seed):\n rng = np.random.RandomState(seed=seed)\n torch_seed = rng.randint(0, 999999)\n torch.manual_seed(seed=torch_seed)\n\n return rng", "def set_seed(self, seed: int):\n self.__sim.seed(seed)", "def generate_random_solution(self):\n # \"Generate random solution\"\n Individual.COUNT += 1\n if INITIALPOP == 'random':\n # Choose randomly a file in the original dataset.\n seed = random.choice(starting_seeds)\n Individual.SEEDS.add(seed)\n elif INITIALPOP == 'seeded':\n # Choose sequentially the inputs from the seed list.\n # NOTE: number of seeds should be no less than the initial population\n assert (len(starting_seeds) == POPSIZE)\n seed = starting_seeds[Individual.COUNT - 1]\n Individual.SEEDS.add(seed)\n\n digit1 = generate_digit(seed)\n digit1.is_original = True\n individual = Individual(digit1, seed)\n individual.seed = seed\n\n return individual", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def random_seed(i): # -> None:\n ...", "def seed_all(seed):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)", "def seed_all(seed):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)", "def _local_seeding(self, space: Space, function: Function) -> None:\n\n new_agents = []\n for i, agent in enumerate(space.agents):\n if self.age[i] == 0:\n for _ in range(self.LSC):\n child = copy.deepcopy(agent)\n\n j = r.generate_integer_random_number(high=child.n_variables)\n child.position[j] += r.generate_uniform_random_number(\n child.lb[j], child.ub[j]\n )\n child.clip_by_bound()\n\n child.fit = function(child.position)\n\n new_agents.append(child)\n\n self.age = [age + 1 for age in self.age]\n\n space.agents += new_agents\n\n self.age += [0] * len(new_agents)", "def init_seed(seed=None):\n if seed is None:\n seed = int(time.time())\n\n LOGGER.info(\"Using seed=%d\", seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)", "def seed(self, seed=None):\n self._np_random, seed = seeding.np_random(seed)\n return [seed]", "def simulate(self, seed=None):\n\n # reset lists\n self.extant_p = []\n self.not_extant_p = []\n self.not_yet_sampled_p = []\n self.extant_h = []\n self.not_extant_h = []\n self.not_yet_sampled_h = []\n\n if seed:\n random.seed(seed) # for debugging and unit testing\n\n # populates host_nodes list\n self.get_node_heights() # extant_h initialized with zero-height host tips\n next_height, next_node = self.host_nodes.pop(0)\n\n # populate host tips with pathogen lineages\n self.initialize_pathogen_tree()\n\n this_height = 0 # initialize at the end of the tree (most recent tip)\n while True:\n if self.verbose:\n # report on state\n print self\n\n # update pathogen locations (hosts) and record pairs\n self.get_pairs()\n\n # total rate of pathogen coalescence or host-switch events within this interval\n lambd_mig = len(self.extant_p) * self.m_rate if len(self.extant_h) > 1 else 0.\n lambd_tot = len(self.choices) * self.c_rate + lambd_mig\n\n # draw waiting time\n wait = random.expovariate(lambd_tot) if lambd_tot > 0 else None\n if wait is None or wait > (next_height - this_height):\n # waiting time exceeds next host node height\n if next_node.is_leaf():\n if self.verbose:\n print ('sampled host tip', next_node.name)\n self.extant_h.append(next_node)\n self.not_yet_sampled_h.remove(next_node)\n\n # activate pathogen lineages in this host tip\n nodes_to_move = filter(lambda x: x.host==next_node, self.not_yet_sampled_p)\n self.extant_p.extend(nodes_to_move)\n for node in nodes_to_move:\n self.not_yet_sampled_p.remove(node)\n else:\n if self.verbose:\n print 'coalesce hosts at', next_node\n self.coalesce_hosts(next_node)\n\n this_height = next_height # move to top of current interval\n if len(self.host_nodes) == 0:\n break\n next_height, next_node = self.host_nodes.pop(0) # retrieve next host node\n continue\n\n # ELSE there is either a host-switch or pathogen coalescence event\n if random.uniform(0, 1) < lambd_mig/lambd_tot:\n if self.verbose:\n print 'switch hosts'\n self.switch_hosts(t0=this_height+wait) # host-switch event\n else:\n # coalescence of pathogen lineages within a host\n if self.verbose:\n print 'coalesce within hosts'\n c_paths = random.choice(self.choices.keys()) # randomly select a pair that can coalesce\n self.coalesce_paths(c_paths, t0=this_height+wait)\n\n this_height += wait # move up the tree\n\n # coalesce remaining pathogen lineages in the last host\n self.get_pairs()\n self.coalesce_within_root(next_node)\n\n return self.extant_p[0].get_tree_root() # should correspond to root of pathogen tree", "def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))", "def set_seed(seeds, simulation_number):\n\n if seeds:\n seed = seeds[simulation_number]\n else:\n seed = None\n np.random.seed(seed)" ]
[ "0.6827026", "0.6489305", "0.6472902", "0.6374276", "0.6374276", "0.6366158", "0.63590467", "0.6344138", "0.6339408", "0.62854254", "0.62784153", "0.62761444", "0.6232792", "0.61779606", "0.6149257", "0.61197567", "0.60963154", "0.6078606", "0.6052686", "0.60478973", "0.6045851", "0.6037681", "0.60271585", "0.60271585", "0.6014858", "0.60129017", "0.60066915", "0.5990238", "0.598816", "0.5984502", "0.5969013", "0.5958218", "0.59437895", "0.5942321", "0.5935733", "0.5931703", "0.5931387", "0.59185874", "0.5876218", "0.5858624", "0.5858443", "0.58390796", "0.583299", "0.58287066", "0.5818603", "0.58091265", "0.5796669", "0.57927835", "0.57924956", "0.57919514", "0.57876956", "0.57858336", "0.5777906", "0.5773883", "0.57726514", "0.5766787", "0.5764316", "0.5760833", "0.5758231", "0.5753426", "0.5753426", "0.5753371", "0.57459444", "0.57441795", "0.57414913", "0.5738559", "0.57354784", "0.5728844", "0.5724547", "0.5721225", "0.5716509", "0.5707893", "0.57073265", "0.57040125", "0.5699055", "0.56857264", "0.56852233", "0.56815016", "0.56670535", "0.56622905", "0.5657738", "0.5657017", "0.5656652", "0.5652644", "0.5648113", "0.5646829", "0.5645996", "0.56393796", "0.56386137", "0.5631261", "0.56178826", "0.56146234", "0.5610886", "0.5610886", "0.56047714", "0.55989003", "0.5596829", "0.55815697", "0.5553695", "0.55505157" ]
0.59798753
30
Draws a random float number from a uniform distribution given by U[lower, upper].
def uniform(lower, upper): return lower + random.random() * (upper - lower)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _rand_float(self, low, high):\n\n return self.np_random.uniform(low, high)", "def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def random_float(low: float, high: float):\n seed = time.time()\n random.seed(seed)\n return random.uniform(low, high)", "def uniform(a: float, b: float) -> float:\n ...", "def random_float():\n return (random() - 0.5) * 2", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def uniform_random_value(l_boundary: float, r_boundary: float) -> float:\n return uniform(l_boundary, r_boundary)", "def rand_uniform(a, b):\n\n\treturn a + lcg.draw_rand_number() * (b - a)", "def UniformRV(low, high):\n return RV(ss.randint(low, high))", "def test_uniform(self):\r\n\r\n s = np.random.uniform(-1.35, 0.5, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def uniform_sample(upper, num):\n sample = []\n for i in range(num):\n value = random.randint(0, upper - 1)\n sample.append(value)\n return sample", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def uniform(low, high, size, dtype=np.float32):\n rng = np.random.default_rng(0)\n out = (high - low) * rng.random(size, dtype=dtype) + low\n return out", "def random() -> float:\n ...", "def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n low = tensor.as_tensor_variable(low)\r\n high = tensor.as_tensor_variable(high)\r\n if dtype is None:\r\n dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype)\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high)\r\n op = RandomFunction('uniform',\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast))\r\n return op(random_state, size, low, high)", "def uniform(\n self, low: float = 0, high: float = 1, size: Optional[Iterable[int]] = None\n ):\n _seed = self._seed() if callable(self._seed) else self._seed\n return _uniform(\n low=low,\n high=high,\n size=size,\n seed=_seed,\n device=self._device,\n handle=self._handle,\n )", "def uniform_dist(low, high):\n return sp_uniform(low, high - low)", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def random_floats(low, high=None, size=None):\n if high is None:\n high = low\n low = 0\n return low + (np.random.random(size) * (high - low))", "def random():\r\n return R.NextDouble()", "def fix_rand_value(lo_bound: float, up_bound: float) -> float:\n # In this patch test function for determinism, just return lower bound\n nonlocal _i, _vals_sequence\n v_return = _vals_sequence[_i]\n _i = (_i + 1) % len(_vals_sequence)\n return v_return", "def draw_uniform(z, generator,device='cpu'):\n\n if '64' in str(torch.get_default_dtype()):\n if 'cuda' == device:\n h = torch.cuda.DoubleTensor(z, 1).uniform_(generator=generator)\n elif 'cpu' == device:\n h = torch.DoubleTensor(z, 1).uniform_(generator=generator)\n else:\n if 'cuda' == device:\n h = torch.cuda.FloatTensor(z, 1).uniform_(generator=generator)\n elif 'cpu' == device:\n h = torch.FloatTensor(z, 1).uniform_(generator=generator)\n return h", "def randomize(lower, upper):\n return lower + (random.random() * (upper - lower))", "def sample_uniform():\n global samples_uniform, isample_uniform\n\n # sample of U(0, 1)\n u = samples_uniform[isample_uniform]\n\n # moving to next index of samples global array\n isample_uniform += 1\n if isample_uniform >= len(samples_uniform):\n # exhausted all samples -> re-drawing samples from U(0, 1)\n samples_uniform = np.random.uniform(size=SIZE_SAMPLES_UNIFORM)\n isample_uniform = 0\n\n return u", "def rand_followers(scale_factor=100):\n return round(-scale_factor * log(rand.uniform()))", "def preturbScalar(value, vrange, distr=\"uniform\"):\n\tif distr == \"uniform\":\n\t\tscale = 1.0 - vrange + 2 * vrange * random.random() \n\telif distr == \"normal\":\n\t\tscale = 1.0 + np.random.normal(0, vrange)\n\telse:\n\t\texisWithMsg(\"unknown noise distr \" + distr)\n\treturn value * scale", "def sample(self):\n u = np.asarray(np.random.uniform())\n return self.invert(u)", "def generate_uniform_random_number(low=0.0, high=1.0, size=1):\n\n uniform_array = np.random.uniform(low, high, size)\n\n return uniform_array", "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def rint(lo, hi):\n return round(0.5 + rand(lo, hi))", "def random_from_bound(bound):\n if (isinstance(bound, tuple)):\n val = np.random.uniform(low = bound[0], high = bound[1])\n else:\n val = 0.0\n return val", "def get_value(self) -> float:\n return random.gauss(self._mu, self._sigma)", "def uniform(stdev, size):\n return numpy.random.uniform(\n low=-stdev * numpy.sqrt(3),\n high=stdev * numpy.sqrt(3),\n size=size\n ).astype(theano.config.floatX)", "def uniform(stdev, size):\n return numpy.random.uniform(\n low=-stdev * numpy.sqrt(3),\n high=stdev * numpy.sqrt(3),\n size=size\n ).astype(theano.config.floatX)", "def randu(*shape):\n # ATTENTION: whether you use randu or randn can make a difference.\n return 2*rand(*shape)-1", "def random_normal():\r\n return inverse_normal_cdf(random.random())", "async def randomFloat(self, ctx: commands.Context, start: float = None, stop: float = None):\n if start is None:\n start = 0.0\n stop = 1.0\n elif stop is None:\n stop = start\n start = 0.0\n if start>stop:\n temp = start\n start = stop\n stop = temp\n await ctx.reply(str(random.uniform(start, stop)), mention_author=False)", "def glorot_uniform(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='uniform', seed=seed)", "def uniform(self, size=None, low=0.0, high=1.0, ndim=None, dtype=None):\r\n return self.gen(uniform, size, low, high, ndim=ndim, dtype=dtype)", "def unforeseen():\r\n return random.gauss(300., 100.)", "def _sample_gumbel(self, shape, eps=1e-20):\r\n U = tf.random_uniform(shape, minval=0, maxval=1)\r\n return -tf.log(-tf.log(U + eps) + eps)", "def uniform(\n shape: Iterable[int],\n low: float = 0,\n high: float = 1,\n comp_node: Optional[CompNode] = None,\n comp_graph: Optional[CompGraph] = None,\n) -> Tensor:\n assert low < high, \"Uniform is not defined when low >= high\"\n\n comp_node, comp_graph = _use_default_if_none(comp_node, comp_graph)\n seed = _random_seed_generator().__next__()\n return low + (high - low) * mgb.opr.uniform_rng(\n shape, seed=seed, comp_node=comp_node, comp_graph=comp_graph\n )", "def loguniform_dist(low, high, base=10):\n return ExponentiateDistribution(sp_uniform(low, high - low), base=base)", "def he_uniform(seed=None):\n # pylint: disable=line-too-long\n # pylint: enable=line-too-long\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=2., mode='fan_in', distribution='uniform', seed=seed)", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def random_normal():\n return inverse_normal_cdf(random.random())", "def generate_number(lbound=1, ubound=100, mean=None, std=None):\n x = np.arange(lbound, ubound + 1)\n if mean and std:\n prob = stats.norm.pdf(x, loc=mean, scale=std)\n prob = prob / prob.sum() #normalize the probabilities so they sum up to 1\n else:\n prob = np.repeat(1 / len(x), len(x))\n num = np.random.choice(x, p=prob)\n return num", "def sample_float(self, start, end, step):\n self.minimum = start\n self.maximum = end\n return random.sample(list(np.arange(start, end, step)), k=self._sample_size)", "def sample_gauss(mean, sd, lower=0, upper=1):\n r = random.gauss(mean, sd)\n while not (lower < r < upper):\n r = random.gauss(mean, sd)\n return r", "def uniform(self, key, min_value=0., max_value=1.):\n return min_value + self._random(key) * (max_value - min_value)", "def normal(mean, std):\n\n return random.gauss(mean, std)", "def randrange(n, vmin, vmax):\n return (vmax - vmin) * np.random.rand(n) + vmin", "def quasi_rand(values, feature, parent):\r\n seed = values[0]\r\n base = values[1]\r\n min = values[2]\r\n max = values[3]\r\n \r\n return math.floor(halton(seed, base) * (max - min + 1) + min)", "def random_brightness(x, brightness_range):\n if len(brightness_range) != 2:\n raise ValueError('`brightness_range should be tuple or list of two floats. '\n 'Received arg: ', brightness_range)\n\n u = np.random.uniform(brightness_range[0], brightness_range[1])\n x = u * x\n\n return x", "def rvsWithinCDFbounds(self,lowerBound,upperBound):\n randResult = self._distribution.inverseCdf(float(random(1))*(upperBound-lowerBound)+lowerBound)\n return randResult", "def glorot(w):\n n_in, n_out = w.size()\n b = math.sqrt(6) / math.sqrt(n_in + n_out)\n return w.uniform_(-b, b)", "def perturb_value(tuple_input):\n id = tuple_input[0]\n value = tuple_input[1]\n from numpy.random import uniform\n perturb_percent = uniform(low=1-perturb_range, high=1+perturb_range, size=(1))[0]\n return (id, float(value*perturb_percent))", "def float(self, max_=None):\n max_ = self.max_float if max_ is None else max_\n return max_ * (self.rng.random() - 0.5)", "def rbernoulli(p):\n # The idea is to sample a random real r in the unit interval, one\n # bit (i.e. binary decimal place) at a time, until we are sure\n # that either r < p or r > p.\n hi = 1.0\n lo = 0.0\n d = -1\n while lo < p < hi:\n if random.getrandbits(1):\n lo = (hi + lo)/2\n else:\n hi = (hi + lo)/2\n print lo,hi\n if p > hi:\n return 1\n else:\n return 0", "def test_uniform(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters\r\n post_r, out = uniform(rng_R, (4,), -2.0, 2.0)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.uniform(-2.0, 2.0, size=(4,))\r\n numpy_val1 = numpy_rng.uniform(-2.0, 2.0, size=(4,))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.allclose(val0, numpy_val0))\r\n self.assertTrue(numpy.allclose(val1, numpy_val1))", "def random_randint(lower_bound, upper_bound):\r\n return random_randrange(lower_bound, upper_bound+1)", "def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):\n minval_dtype = F.dtype(minval)\n maxval_dtype = F.dtype(maxval)\n const_utils.check_valid_type(dtype, [mstype.int32, mstype.float32], 'uniform')\n const_utils.check_tensors_dtype_same(minval_dtype, dtype, \"uniform\")\n const_utils.check_tensors_dtype_same(maxval_dtype, dtype, \"uniform\")\n seed1, seed2 = _get_seed(seed, \"uniform\")\n if const_utils.is_same_type(dtype, mstype.int32):\n random_uniform = P.UniformInt(seed1, seed2)\n value = random_uniform(shape, minval, maxval)\n else:\n uniform_real = P.UniformReal(seed1, seed2)\n random_uniform = uniform_real(shape)\n value = random_uniform * (maxval - minval) + minval\n return value", "def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)", "def uniform_latitude(u):\n return np.pi / 2 - np.arccos(np.sqrt(1 - u))", "def sample_gumbel(shape, eps=1e-10):\n U = torch.rand(shape).float()\n return - torch.log(eps - torch.log(U + eps))", "def rand_rate_uni( N, tmin, tmax, **kwargs):\n return np.random.uniform( tmin, tmax, size = N)", "def _u_naught_simple(self):\n # Random is better to give different multipliers in the subgradient phase\n return np.random.rand(self.mrows)*1.", "def log_uniform_sample(sample_range):\n log_min = np.log10(sample_range[0])\n log_max = np.log10(sample_range[1])\n u = np.random.rand()*(log_max-log_min) + log_min\n return np.power(10.0,u)", "def samplepoint(x,u):\n return point(x)", "def sample_gumbel(shape, eps=1e-20):\n U = tf.random_uniform(shape,minval=0,maxval=1)\n return -tf.log(-tf.log(U + eps) + eps)", "def sample_gumbel(shape, eps=1e-20):\n U = tf.random_uniform(shape,minval=0,maxval=1)\n return -tf.log(-tf.log(U + eps) + eps)", "def generate():\n global data\n data = []\n # Generate a random data set\n for _ in range(usr_size.get()):\n data.append(random.randrange(usr_min.get(), usr_max.get()+1))\n display_data(data, ['red' for x in range(len(data))])", "def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline", "def random(self, size, mu, sigma):\n U = uniform.rvs(size=size)\n return self.qf(U, mu, sigma)", "def generate(random, lower, upper, count=1):\n if count > 1:\n points = []\n\n for x in range(lower.x, upper.x):\n for y in range(lower.y, upper.y):\n points.append(Point(x, y)) # REFACTOR: Not very efficient\n\n return random.sample(points, count)\n else:\n return Point(random.randrange(lower.x, upper.x), random.randrange(lower.y, upper.y))", "def sample_gumbel(shape, eps=1e-20):\n U = tf.random_uniform(shape, minval=0, maxval=1)\n return -tf.log(-tf.log(U + eps) + eps)", "def draw(self, *size):\n max_value = self.alias.size(0)\n\n kk = self.alias.new(*size).random_(0, max_value).long().view(-1)\n prob = self.prob[kk]\n alias = self.alias[kk]\n # b is whether a random number is greater than q\n b = torch.bernoulli(prob).long()\n oq = kk.mul(b)\n oj = alias.mul(1 - b)\n\n return (oq + oj).view(size)", "def rand_sample_gauss():\n mean = float(NUM_UNIQUE_VALUES + 1) / 2\n while True:\n r = random.normalvariate(mean, DIST_PARAM)\n value = int(round(r))\n # Rejection sampling to cut off Gaussian to within [1, NUM_UNIQUE_VALUES]\n if 1 <= value <= NUM_UNIQUE_VALUES:\n break\n\n return value # true client value", "def random_num(range_start,range_end):\r\n return random.randint(range_start,range_end)", "def draw(function):\n def f(**kwargs):\n rand = random.random()\n x = function(**kwargs)\n return int(rand < x)\n return f", "def _generate_random_vector(size):\n return np.random.uniform(-0.1, 0.1, size)", "def convert_random_uniform(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to float32\n low = float(attrs.get(\"low\", 0))\n high = float(attrs.get(\"high\", 1.0))\n shape = convert_string_to_list(attrs.get('shape', '[]'))\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]\n\n node = onnx.helper.make_node(\n 'RandomUniform',\n input_nodes,\n [name],\n low=low,\n high=high,\n dtype=dtype,\n shape=shape,\n name=name\n )\n return [node]", "def sample_gumbel(shape, eps=1e-20):\n U = tf.random_uniform(shape, minval=0, maxval=1)\n return -tf.log(-tf.log(U + eps) + eps)", "def rand_val(val_range):\r\n if isinstance(val_range, (list, tuple, np.ndarray)):\r\n return np.random.uniform(val_range[0], val_range[-1])\r\n # Assume val_range is a number\r\n return val_range", "def generate_uv(number_pixels, center=0.0 * apu.arcsec, pixel_size=1.0 * apu.arcsec):\n x = (np.arange(number_pixels) - number_pixels / 2 + 0.5) * (1 / (pixel_size * number_pixels))\n if center.value != 0.0:\n x += 1 / center\n return x", "def float_rand(a=0, b=1, size=None):\n \n return np.random.random_sample(size) * (b - a) + a", "def draw_logarithmic(low, high, size=1):\n\n if np.any(low <= 0.0) or np.any(high <= 0.0):\n raise ValueError('draw_logarithmic expects positive arguments')\n\n llow = np.log(low)\n lhigh = np.log(high)\n\n return np.exp(nr.uniform(low=llow, high=lhigh, size=size))", "def rand(self, lo, hi):\n lo, hi = lo or 0, hi or 1\n\n self.seed = (16807 * self.seed) % 2147483647\n return lo + (hi - lo) * self.seed / 2147483647", "def rand(self, lo, hi):\n lo, hi = lo or 0, hi or 1\n\n self.seed = (16807 * self.seed) % 2147483647\n return lo + (hi - lo) * self.seed / 2147483647", "def sample_from_unit_ball(rng, dim):\n vec = rng.randn(dim)\n return vec / np.sqrt(np.sum(vec**2))", "def _random_shoot_bernstein_ ( fun ) :\n xmn = fun.xmin ()\n xmx = fun.xmax ()\n ymx = max ( fun.bernstein().pars() )\n i = 0 \n while True : \n x = _uniform_ ( xmn , xmx ) \n y = _uniform_ ( 0 , ymx )\n v = fun ( x )\n if v >= y : return x", "def sample_gumbel(shape, eps=1e-10, out=None):\r\n U = out.resize_(shape).uniform_() if out is not None else torch.rand(shape)\r\n return - torch.log(eps - torch.log(U + eps))", "def rnd_choice(start, end, step, output_type=float):\n nums = np.append(np.arange(start, end, step), end)\n return output_type(np.random.choice(nums))", "def preturbScalarAbs(value, vrange):\n\tdelta = - vrange + 2.0 * vrange * random.random() \n\treturn value + delta", "def random_gumbel(shape,\n mu=0.0,\n beta=1.0,\n dtype=conf.dtype,\n seed=None,\n name=None):\n u = tf.random_uniform(\n shape=shape,\n minval=0,\n maxval=1,\n dtype=dtype,\n seed=seed,\n name=name\n )\n g = -tf.log(-tf.log(u))\n g = mu + g * beta\n return g", "def test_uniform(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.uniform((2,2), -1, 1))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.uniform(-1, 1, size=(2,2))\r\n numpy_val1 = rng.uniform(-1, 1, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def gauss_sample(num, stdev):\n sample = np.random.normal(0, stdev, num)\n sample = sample.round().astype(int)\n return sample" ]
[ "0.7035064", "0.67842853", "0.66727394", "0.65749025", "0.650535", "0.64895654", "0.6346513", "0.63048273", "0.62389475", "0.61971956", "0.6175181", "0.6122292", "0.61146706", "0.61146706", "0.6104602", "0.6089752", "0.60430914", "0.6024726", "0.6020649", "0.597315", "0.5962495", "0.59539646", "0.5949301", "0.59025496", "0.58703953", "0.5815848", "0.5788794", "0.57874167", "0.5783046", "0.5759214", "0.5741103", "0.57271", "0.5721169", "0.57194805", "0.57069767", "0.56978595", "0.56978595", "0.5687614", "0.56797135", "0.56668854", "0.5660805", "0.56546605", "0.56437236", "0.56078464", "0.5606115", "0.5583523", "0.55735505", "0.5560146", "0.5554552", "0.5546575", "0.5526201", "0.55202514", "0.5499334", "0.5489662", "0.54884183", "0.5468249", "0.5418397", "0.5418273", "0.5410358", "0.54102546", "0.5409959", "0.5404164", "0.5400844", "0.5395446", "0.53940773", "0.53910637", "0.53875536", "0.53845555", "0.53489673", "0.534596", "0.5343889", "0.53306264", "0.53113973", "0.53113973", "0.5301421", "0.528817", "0.52832973", "0.5282727", "0.527804", "0.52651775", "0.5261278", "0.52565247", "0.52506953", "0.52478445", "0.5246572", "0.52459997", "0.5242747", "0.5224192", "0.52103555", "0.5196835", "0.5175762", "0.5175762", "0.51730394", "0.517059", "0.516816", "0.5162691", "0.5152079", "0.51451147", "0.51430964", "0.51407856" ]
0.73602164
0
Draws a random float number from a normal distribution
def normal(mean, std): return random.gauss(mean, std)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_normal():\r\n return inverse_normal_cdf(random.random())", "def random_normal():\n return inverse_normal_cdf(random.random())", "def random_float():\n return (random() - 0.5) * 2", "def draw_normal(self):\n means, scale = self.get_means_and_scales()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T", "def draw_normal_initial(self):\n means, scale = self.get_means_and_scales_from_q()\n return np.random.normal(means,scale,size=[self.sims,means.shape[0]]).T", "def random():\r\n return R.NextDouble()", "def random() -> float:\n ...", "def _rand_float(self, low, high):\n\n return self.np_random.uniform(low, high)", "def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)", "def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def _gen_normal(self, count, **kwargs):\n normal = scipy.stats.norm(loc=kwargs['mean'], scale=kwargs['stdev'])\n rvs = normal.rvs(count)\n return rvs", "def get_standard_normal_distribution():\n return np.random.normal(0, 1)", "def test_uniform(self):\r\n\r\n s = np.random.uniform(-1.35, 0.5, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def cdf_normal(x: float, mean: float = 0, std: float = 1):\n return (1 / 2) * (1 + erf((x - mean) / (std * sqrt(2))))", "def glorot_normal(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='truncated_normal', seed=seed)", "def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)", "def normal(self, position):\n return self._normal", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def stdProbabilityNorm(self):\n return 0.5", "def normal_sample(mu, sigma):\n return mu + sigma * torch.randn_like(sigma)", "def test_randn_normal_distribution():\n\n seed = 28041995\n pts = 10**5\n alpha = 0.05\n expected_mean = 0.0\n expected_var = 1.0\n\n dpnp.random.seed(seed)\n res = dpnp.asnumpy(dpnp.random.randn(pts))\n var = numpy.var(res)\n mean = numpy.mean(res)\n assert math.isclose(var, expected_var, abs_tol=0.03)\n assert math.isclose(mean, expected_mean, abs_tol=0.03)", "def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01", "def get_value(self) -> float:\n return random.gauss(self._mu, self._sigma)", "def density_normal_dist(x):\n return ONE_OVER_SQRT_OF_TWO_PI * exp(-0.5 * x * x)", "def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)", "def GetNormal(self):\n ...", "def normalVect(self, n=2):\n L = len(self.vertices)\n normals = []\n while len(normals) < n:\n j = randrange(L)\n v0 = vector(self.vertices[j].coords())\n v1 = vector(self.vertices[int(j + L / 3) % L].coords())\n v2 = vector(self.vertices[int(j + 2 * L / 3) % L].coords())\n try:\n normals.append(((v1 - v0) * (v2 - v0)).normalize())\n except ValueError:\n pass\n return (1 / len(normals)) * sum(normals, vector(0, 0, 0))", "def normal_distr(x, mu, sigma, s=1):\n \n return s * 1/(sigma * torch.sqrt(torch.tensor(2 * np.pi))) * torch.exp((-1/2) * ((x - mu) / sigma) ** 2)", "def draw_powerlaw(alpha, rng, N=1):\n if alpha == -1:\n alpha = -1.0000001\n # Normalization factor\n x0, x1 = rng\n C = (alpha + 1) / (x1**(alpha + 1) - x0**(alpha + 1))\n \n if N==1:\n u = np.random.random()\n else:\n u = np.random.random(N)\n x = ((u * (alpha + 1)) / C + x0**(alpha + 1))**(1./(alpha + 1))\n\n return x", "def calcRandNorm(mean,std,seed,var):\n\n varR = (seed*(1.0+var)-seed*(1.0-var))\n val = np.random.random(size=np.size(varR))*varR+seed\n np.clip(val,0.01,0.99,out=val)\n val = sps.norm.ppf(val,loc=mean,scale=std)\n return val", "def draw(self, nsamples):\n \n if self.dist == 'normal':\n mean = self.mean\n sd = self.sd\n self.sample = sd * np.random.randn(nsamples) + mean\n \n elif self.dist == 'poisson':\n lam = self.lam\n self.sample = np.random.poisson(lam, size=nsamples)\n \n elif self.dist == 'binomial':\n n = self.n\n p = self.p\n self.sample = np.random.binomial(n, p, size=nsamples)\n \n else:\n print('dist must be normal, poisson or binomial')", "def MakeNormalPlot(ys, root=None, line_options={}, **options):\n # TODO: when n is small, generate a larger sample and desample\n n = len(ys)\n xs = [random.normalvariate(0.0, 1.0) for i in range(n)]\n #xs=EstimateRankits(n)\n pyplot.clf()\n pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)\n \n myplot.Show(xlabel = 'Standard normal values',\n legend=False,\n **options)", "def normal(target, seeds, scale, loc):\n import scipy.stats as spts\n\n seeds = target[seeds]\n value = spts.norm.ppf(q=seeds, scale=scale, loc=loc)\n return value", "async def randomFloat(self, ctx: commands.Context, start: float = None, stop: float = None):\n if start is None:\n start = 0.0\n stop = 1.0\n elif stop is None:\n stop = start\n start = 0.0\n if start>stop:\n temp = start\n start = stop\n stop = temp\n await ctx.reply(str(random.uniform(start, stop)), mention_author=False)", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def normalDist(x, mean, var):\n\n if (var != 0):\n return -(x - mean)**2/(2*var) - 0.5*np.log(2*np.pi*var)\n else:\n return 0", "def normpdf(x, mean, sd):\n var = float(sd)**2\n denom = (2*math.pi*var)**.5\n num = math.exp(-(float(x)-float(mean))**2/(2*var))\n return num/denom", "def normal(x, mean, dispersion):\n return np.exp(\n -1 * squared_norm(x - mean) / (2 * dispersion)\n ) / (\n np.sqrt(2 * np.pi * dispersion)\n )", "def uniform_random_value(l_boundary: float, r_boundary: float) -> float:\n return uniform(l_boundary, r_boundary)", "def perturb(x):\n gf_crazyness = 0.2\n try:\n # x is a vector\n return x+np.random.normal(scale=gf_crazyness, size=len(x))\n except:\n # x is a scalar\n return x+np.random.normal(scale=gf_crazyness)", "def approx_standard_normal_cdf(x):\n return 0.5 * (1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3))))", "def rand_uniform(a, b):\n\n\treturn a + lcg.draw_rand_number() * (b - a)", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.normal((2,2), -1, 2))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def float(self, max_=None):\n max_ = self.max_float if max_ is None else max_\n return max_ * (self.rng.random() - 0.5)", "def random_with_deviation(self, number):\n min_bound = round(number - number / (100 / self.DEVIATION_PERCENTAGE))\n max_bound = round(number + number / (100 / self.DEVIATION_PERCENTAGE))\n\n return randint(min_bound, max_bound)", "def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None):\r\n avg = tensor.as_tensor_variable(avg)\r\n std = tensor.as_tensor_variable(std)\r\n if dtype is None:\r\n dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype)\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std)\r\n op = RandomFunction('normal',\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast))\r\n return op(random_state, size, avg, std)", "def random_float(low: float, high: float):\n seed = time.time()\n random.seed(seed)\n return random.uniform(low, high)", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b / np.sum(b, 1)[:, None]", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def random_distribution():\n b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])\n return b/np.sum(b, 1)[:,None]", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def rand(self, mean, count=1, min=0, max=numpy.inf):\n a = (min-mean)/numpy.sqrt(mean)\n b = (max - mean)/numpy.sqrt(mean)\n return numpy.int64(numpy.round(stats.truncnorm.rvs(a, b, loc=mean, scale=numpy.sqrt(mean), size=count)))", "def test_2_normal(self):\n print(\"test 2: normal distributions\")\n\n mean = self.means[0]\n dispersion = self.dispersions[0]\n\n for i, x in enumerate(self.X):\n print(i+1, normal(x, mean, dispersion), sep=' : ')", "def he_normal(seed=None):\n # pylint: disable=line-too-long\n # pylint: enable=line-too-long\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)", "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def normal(self):\n M = numpy.sqrt(self.magnitude())\n self.pure = self.pure / M\n self.real = self.real / M", "def convert_random_normal(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n # Converting to float32\n mean = float(attrs.get(\"loc\", 0))\n scale = float(attrs.get(\"scale\", 1.0))\n shape = convert_string_to_list(attrs.get('shape', '[]'))\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]\n\n node = onnx.helper.make_node(\n 'RandomNormal',\n input_nodes,\n [name],\n mean=mean,\n scale=scale,\n dtype=dtype,\n shape=shape,\n name=name\n )\n return [node]", "def blauNormal(blau, N):\n nom = blau - (1/N)\n den = 1 - (1/N)\n\n return nom / den", "def safe_rand(self):\n rand_n = np.random.rand()\n if rand_n == float(1):\n rand_n -= 1e-10\n return rand_n", "def _normal_distribution_cdf(x, stddev):\n return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20)))", "def get_normal(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n u = np.array([1, 0])\n return np.dot(r, u)", "def uniform(stdev, size):\n return numpy.random.uniform(\n low=-stdev * numpy.sqrt(3),\n high=stdev * numpy.sqrt(3),\n size=size\n ).astype(theano.config.floatX)", "def uniform(stdev, size):\n return numpy.random.uniform(\n low=-stdev * numpy.sqrt(3),\n high=stdev * numpy.sqrt(3),\n size=size\n ).astype(theano.config.floatX)", "def random_importance_function():\n return random()", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n # Use non-default parameters\r\n post_r, out = normal(rng_R, (2, 3), 4.0, 2.0)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0 = f()\r\n val1 = f()\r\n numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\r\n numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.allclose(val0, numpy_val0))\r\n self.assertTrue(numpy.allclose(val1, numpy_val1))", "def _get_visual_position(self, point: int) -> float:\n return point / self._param[\"n_v\"] + np.random.uniform() / \\\n self._param[\"n_v\"]", "def normal_lcdf(mu, sigma, x):\n z = (x - mu) / sigma\n return pt.switch(\n pt.lt(z, -1.0),\n pt.log(pt.erfcx(-z / pt.sqrt(2.0)) / 2.0) - pt.sqr(z) / 2.0,\n pt.log1p(-pt.erfc(z / pt.sqrt(2.0)) / 2.0),\n )", "def normal_pdf(x: torch.Tensor) -> torch.Tensor:\n return torch.exp(-(x ** 2 / 2)) / np.sqrt(2 * np.pi)", "def get_random_vector(self, mean, standard_deviaton):\n result = []\n for i in xrange(len(mean)):\n result.append(np.random.normal(mean[i], standard_deviaton[i]))\n return result", "def x_bar_Normal_distribution(a, b, n):\r\n mean = normal_mean(a, b)\r\n deviation = normal_deviation(a, b)\r\n normal_x_bar_deviation = deviation / math.sqrt(n)\r\n print(\"The standard deviation of the sample of means from the normal distribution ( [n] samples ) is: \", normal_x_bar_deviation)\r\n return normal_x_bar_deviation", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.normal((2,2), -1, 2))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def generate_number(lbound=1, ubound=100, mean=None, std=None):\n x = np.arange(lbound, ubound + 1)\n if mean and std:\n prob = stats.norm.pdf(x, loc=mean, scale=std)\n prob = prob / prob.sum() #normalize the probabilities so they sum up to 1\n else:\n prob = np.repeat(1 / len(x), len(x))\n num = np.random.choice(x, p=prob)\n return num", "def unforeseen():\r\n return random.gauss(300., 100.)", "def normal(self, point):\n return self._normal.dup()", "def random_uniform(self, n_samples=1, max_norm=1):\n point = ((np.random.rand(n_samples, self.dimension) - .5)\n * max_norm)\n point = self.intrinsic_to_extrinsic_coords(point)\n assert np.all(self.belongs(point))\n\n assert point.ndim == 2\n return point", "def random(N, D, rng):\n samples = rng.randn(N, D)\n norm = np.sqrt(np.sum(samples*samples, axis=1))\n return samples/norm[:,None]", "def sample_one(self):\n # x = self.mean + self.sigma * np.random.normal()\n x = self.dist.sample(1)\n return x", "def h_normal_gpytorch(s):\n return torch.log(s * (2 * np.e * np.pi) ** 0.5)", "def uniform(a: float, b: float) -> float:\n ...", "def rvs(self):\n return float(self.interp(random.rand()))", "def random_glove_generator(emb_mean, emb_stddev):\n x = np.random.normal(loc=0.0, scale=1.0, size=len(emb_mean))\n x_rand = np.multiply(x, emb_stddev) + emb_mean\n return x_rand", "def __call__(self, shape):\n return np.random.normal(loc=self.mean, scale=self.stddev, size=shape)", "def gauss_sample(num, stdev):\n sample = np.random.normal(0, stdev, num)\n sample = sample.round().astype(int)\n return sample", "def normal_lower_bound(probability: float, mu: float = 0, sigma: float = 1) -> float:\n return inverse_normal_cdf(1 - probability, mu, sigma)", "def stdProbabilityNorm(self,std=False):\n sv = str(scipy.__version__).split('.')\n if int(sv[0])==0 and int(sv[1])==15:\n self.raiseAWarning('SciPy 0.15 detected! In this version, the normalization factor for normal distributions was modified.')\n self.raiseAWarning('Using modified value...')\n return 1.0/np.sqrt(np.pi/2.)\n else:\n return 1.0/np.sqrt(2.*np.pi)", "def uniform(lower, upper):\n\n return lower + random.random() * (upper - lower)", "def sample_from(self):\n return numpy.random.normal(self.mu, math.sqrt(self.sigma))", "def normal(self, point):\n point = self._center - np.array(point)\n # if abs(point.dot(point) - self._radius**2) > 1e-15:\n # raise RayTraceError(\n # 'Cannot compute normal. Point is too far from surface ({}).'.format(\n # (abs(point.dot(point) - self._radius**2))))\n return normalize(point / self._radius)", "def normal(vx,vy,n):\n if vx==0:\n if vy==0: \n return (0,0)\n else:\n return (0,n)\n elif vy==0:\n return (n,0)\n else:\n return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))", "def schlichtkrull_normal_(tensor, shape, gain=1.):\n std = schlichtkrull_std(shape, gain)\n with torch.no_grad():\n return tensor.normal_(0.0, std)", "def rnd(n: float, n_places: int) -> float:\n mult = math.pow(10, n_places or 3)\n return math.floor(n * mult + 0.5) / mult", "def rnd(n: float, n_places: int) -> float:\n mult = math.pow(10, n_places or 3)\n return math.floor(n * mult + 0.5) / mult", "def random_normal_initializer(mean=0.0, stddev=1.0, seed=None,\n dtype=dtypes.float32):\n def _initializer(shape, dtype=_assert_float_dtype(dtype)):\n return random_ops.random_normal(shape, mean, stddev, dtype, seed=seed)\n return _initializer", "def gaussian(var):\n stddev = np.sqrt(var)\n return stats.norm(0, stddev)", "def randomPointOnSphere(r):\n x = np.random.normal()\n y = np.random.normal()\n z = np.random.normal()\n point = np.array([x, y, z])\n point *= r/(x**2 + y**2 + z**2)**.5\n return point" ]
[ "0.7677711", "0.7557377", "0.72493887", "0.724668", "0.69417983", "0.6804732", "0.67062134", "0.65800136", "0.65611786", "0.64893746", "0.62739146", "0.62513113", "0.6185571", "0.6180643", "0.6170201", "0.6118213", "0.6113022", "0.61081076", "0.60634816", "0.606164", "0.6053261", "0.6048532", "0.6046962", "0.6029117", "0.6011317", "0.6006124", "0.60031235", "0.59798676", "0.59587234", "0.5958701", "0.59559035", "0.5948708", "0.5946299", "0.59378296", "0.5930045", "0.5913742", "0.5904664", "0.58839273", "0.5879883", "0.5870295", "0.585434", "0.5854064", "0.58465976", "0.58432823", "0.58411473", "0.5835973", "0.5833082", "0.5831344", "0.5820473", "0.5805476", "0.5805476", "0.5802526", "0.5802526", "0.5802526", "0.5802526", "0.5802526", "0.5798418", "0.5797507", "0.57948613", "0.57752657", "0.57679546", "0.5755048", "0.57509124", "0.5750104", "0.57499325", "0.57324195", "0.5728836", "0.5728836", "0.57265556", "0.5712614", "0.5703898", "0.5700577", "0.56931096", "0.5690754", "0.5686362", "0.56837237", "0.56758684", "0.56675667", "0.56541044", "0.5651711", "0.56493825", "0.56474555", "0.5621236", "0.5612436", "0.56065446", "0.5597121", "0.5592989", "0.55768543", "0.5571747", "0.5565726", "0.5563343", "0.5562455", "0.5561511", "0.5557861", "0.5554791", "0.555203", "0.555203", "0.5547883", "0.55461913", "0.55445594" ]
0.70076823
4
Computes standard deviation for current generation search.
def _dispersion_spread(self, n): return pow(self.max_iters-self.iteration, n) / pow(self.max_iters, n) * \ (self.max_std - self.min_std) + self.min_std
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_std(self) -> float:\n\n if self.data:\n return np.std(self.data)\n else:\n return self.sigma", "def stddev(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n stddev = variance**(1/2)\n if sample == False:\n variance = sum(distance_squared)/(self.size)\n stddev = variance**(1/2)\n return stddev", "def _std(self, data):\n var = stats.var(data)\n if var>0.0:\n sd = math.sqrt(var)\n else:\n sd = 0.0\n return sd", "def standard_deviation(self):\r\n\t\treturn self.variance()**(1/2)", "def get_stdev(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n from math import sqrt\n return sqrt(cls.get_var(data, is_population))", "def get_stddev(self):\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i]))", "def standard_deviation(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n return math.sqrt(clean.variance())", "def global_std_deviation(self):\n\n return np.std(self.average_scores_all_subjects(), axis=0)", "def std_dev(self) -> float:\n return math.sqrt(self.variance())", "def _get_standard_deviation(intermediate_normalization_dict):\n\n num_values = float(intermediate_normalization_dict[NUM_VALUES_KEY])\n multiplier = num_values / (num_values - 1)\n\n return numpy.sqrt(multiplier * (\n intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] -\n intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2\n ))", "def get_stdev(self):\n var_x = numpy.var(self._x)\n var_y = numpy.var(self._y)\n return numpy.sqrt(var_x + var_y)", "def standard_deviation( values, sample=False ):\n return ma.sqrt( variance( values, sample ) )", "def std(self):\n\n return self._reduce_for_stat_function(F.stddev, only_numeric=True)", "def calc_standard_deviation(data: list) -> float:\n mean = calc_mean(data)\n acc = 0.0\n for n in data:\n acc += (n - mean) ** 2\n acc /= len(data) - 1\n return math.sqrt(acc)", "def std(self):\n stds = [(x.m-self.mean)**2 + x.std**2 for x in self.xs]\n return np.sqrt(np.dot(self.a, np.array(stds)))", "def standard_dev(self):\n return self.variance()**0.5", "def _std(self):\n\n\t\t#print opt.hess_inv.todense()\n\t\td = 1E-7\n\t\ttheta = self.theta\n\n\t\tTheta = np.copy(theta)\n\t\tTheta[0] = Theta[0] + d\n\t\taa1 = self.objfxn(tuple(Theta))\n\t\tTheta = np.copy(theta)\n\t\tTheta[0] = Theta[0] - d\n\t\taa2 = self.objfxn(tuple(Theta))\n\t\taa3 = self.objfxn(tuple(theta))\n\n\t\tself.stda = 1/np.sqrt((aa1 - 2*aa3 + aa2)/d**2)\n\n\t\tTheta = np.copy(theta)\n\t\tTheta[1] = Theta[1] + d\n\t\tbb1 = self.objfxn(tuple(Theta))\n\t\tTheta = np.copy(theta)\n\t\tTheta[1] = Theta[1] - d\n\t\tbb2 = self.objfxn(tuple(Theta))\n\t\tbb3 = self.objfxn(tuple(theta))\n\n\t\tself.stdb = 1/np.sqrt((bb1 - 2*bb3 + bb2)/d**2)\n\n\t\td = 1E-9\n\t\tTheta = np.copy(theta)\n\t\tTheta[2] = Theta[2] + d\n\t\tcc1 = self.objfxn(tuple(Theta))\n\t\tTheta = np.copy(theta)\n\t\tTheta[2] = Theta[2] - d\n\t\tcc2 = self.objfxn(tuple(Theta))\n\t\tcc3 = self.objfxn(tuple(theta))\n\n\t\tself.stdc = 1/np.sqrt((cc1 - 2*cc3 + cc2)/d**2)\n\n\t\treturn self", "def stdev(items):\n return Series.std(Series(items))", "def stddev(r):\n avg = average(r)\n sdsq = sum([(i - avg) ** 2 for i in r])\n return (sdsq / (len(r) - 1 or 1)) ** 0.5", "def std(self):\r\n return np.std(self.data_array)", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def standard_deviation(xs: List[float]) -> float:\n return math.sqrt(variance(xs))", "def std(self) -> float:\n return self._data.std()", "def calculate_std_dev(X):\n\tstd_dev = np.sqrt(calculate_variance(X))\n\treturn std_dev", "def std(self, dset):\n avg = self.mean(dset)\n variance = sum([math.pow(x - avg, 2) for x in dset])\n std = math.sqrt(variance)\n return std", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def standard_deviation(data):\n\n return np.sqrt(variance(data))", "def get_std_dev(self, data):\n mean = 0\n data_arr = []\n for i in data:\n data_arr.append(i[1])\n return statistics.stdev(data_arr)", "def get_stdev(self):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")", "def std(self):\n variance, mean = self.variance()\n standard_deviation = variance**0.5\n print(f\"Standard Deviation is: {standard_deviation}\")\n return standard_deviation, mean", "def get_stdev(self, recalc=False):\n if self.stdev is not None and not recalc:\n return self.stdev\n\n self.stdev = np.std(self.img.ravel())\n return self.stdev", "def std(self):\n\t\treturn np.sqrt(0.6) #obtained by integrating 1.5x^4 from -1 to 1", "def stdDev(data):\r\n sum = 0\r\n ave = average(data)\r\n for i in data:\r\n sum += (i-ave)**2\r\n return math.sqrt(sum/len(data))", "def std(self):\n return np.sqrt(self.var)", "def stdev(values):\n mean = avg(values)\n diffs = [(value - mean) ** 2 for value in values]\n return avg(diffs) ** 0.5", "def std_mean(self):\n std = self.std\n if self.ddof != 0:\n # ddof correction, (need copy of std)\n std = std * np.sqrt(\n (self.sum_weights - self.ddof) / self.sum_weights\n )\n\n return std / np.sqrt(self.sum_weights - 1)", "def overall_standard_deviation(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n return np.std(test_data - truth_data)", "def std_dev(L, is_sample=0):\n\treturn math.sqrt(variance(L, is_sample))", "def calculate_mean_stdev(self):\n sentences = [self.tokens_from_string(x) + ['.']\n for x in self.testing_set.split(\".\")]\n probabilities = []\n for sentence in sentences:\n # skip short sentences\n if len(sentence) <= self.order:\n continue\n\n prob = self.prob_calculate(sentence)\n probabilities.append(prob / (len(sentence) - self.order))\n\n self.mean = statistics.mean(probabilities)\n self.stdev = statistics.stdev(probabilities)", "def std(self):\n return self._summarize(lambda c: c.std)", "def _compute_global_std(self, dataset, session, limit=None):\n _dataset = dataset\n std = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray):\n std = np.std(_dataset)\n else:\n for i in range(len(dataset)):\n std += np.std(dataset[i]) / len(dataset)\n self.global_std.assign(std, session)\n return std", "def get_stdev(self):\n if self._y.mean() == 0:\n raise ValueError('invalid value of mean of y, the ratio is not computable')\n\n var = numpy.mean(self._x ** 2) * numpy.mean(1.0 / self._y ** 2) - \\\n (numpy.mean(self._x) ** 2) * (numpy.mean(1.0 / self._y) ** 2)\n return numpy.sqrt(var)", "def std(self, ddof=0, weight_by_area=True):\n return numpy.sqrt(self.var(ddof=ddof, weight_by_area=weight_by_area))", "def sd(self, dist=None):\n return np.sqrt(self.var(dist))", "def StandardDeviation(numlist):\n\tv = Variance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def varianza(self):\n\n return np.std(self.__dist_lista)", "def standard_deviation(lst):\n\tnum_items = len(lst)\n\tif num_items == 0:\n\t\treturn -1\n\tmean = sum(lst) / num_items\n\tdifferences = [x - mean for x in lst]\n\tsq_differences = [d ** 2 for d in differences]\n\tssd = sum(sq_differences)\n\treturn ssd", "def test_stddev(self):\n self.assertEqual(stddev(list1, sample=False), np.std(list1))\n self.assertEqual(stddev(list1), np.std(list1, ddof=1))", "def calc_stdev(a, b, c, d, e):\n mean_of_num = (a + b + c + d + e) / 5\n return (((a - mean_of_num)**2 + (b - mean_of_num)**2 + (c - mean_of_num)**2\n + (d - mean_of_num)**2 + (e - mean_of_num)**2) / 5) ** 0.5", "def pooled_standard_deviation(input_variances):\r\n # compute and return pooled standard deviation\r\n return sqrt(mean(square([float(i) for i in input_variances])))", "def sampleStandardDeviation(numlist):\n\tv = sampleVariance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def std_deviation(array):\n if not array or len(array) == 1:\n return 0\n\n average = AGGREGATES['mean_arithmetic'](array)\n variance = map(lambda x: (x-average)**2,array)\n stdev = AGGREGATES['mean_arithmetic'](variance)\n return math.sqrt(stdev)", "def std(self):\n return np.sqrt(self.alpha) / self.beta", "def sd(vals):", "def get_fitness_stdev(self):\n return self.get_fitness_stat(stdev)", "def mean_deviation(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: abs(x - _mean), self.sample))/len(self.sample)", "def standard_deviation(list):\n num_items = len(list)\n mean = sum(list) / num_items\n differences = [x - mean for x in list]\n sq_differences = [d ** 2 for d in differences]\n ssd = sum(sq_differences)\n\n\n variance = ssd / num_items\n\n sd = sqrt(variance)\n\n return sd", "def get_std(self):\n std = 2 * np.sqrt(np.diag(np.dot(self._Phi.T, np.dot(self._sigma_W, self._Phi))))\n return std", "def calc_std_deviation(average):\r\n sqr_sum = 0\r\n count = len(records)\r\n for i in records:\r\n value = int(i[i.find(',')+1:])\r\n sqr_sum+=(value-average)**2 \r\n std_deviation = math.sqrt(sqr_sum/count)\r\n return std_deviation", "def stdev(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmean_values=column_matrix.std(0)\n\tstd_values=mean_values.tolist()\n\treturn std_values", "def get_std(self):\r\n cov = self.to_sparse().diagonal()\r\n std = np.sqrt(cov)\r\n return pd.Series(std, index=self.data.index, name=\"STD\")", "def std(self):\n return self.data.std(axis=-1, keepdims=True)", "def calculate_std(self, _moves, s, att):\n # std = 0\n # for move in _moves:\n # std += move.__dict__[att].__dict__[s] ** 2\n # std -= self.mean_instance.__dict__[att].__dict__[s] ** 2\n # std /= len(_moves)\n # std = np.sqrt(std)\n move_array = []\n for move in _moves:\n move_array.append(move.__dict__[att].__dict__[s])\n move_array = np.array(move_array)\n std = move_array.std(axis=0)\n return std", "def std_run_time(self) -> float:\n return float(self.result_array.sum(axis=0).std())", "def calc_std(sig):\n return np.std(sig)", "def get_std_dev(data, n = -1):\n mean = get_mean(data, n =n)\n\n deviations = []\n\n for i in range(0,n):\n deviations.append( (data[i] - mean)**2 )\n\n std_dev = sqrt( sum(deviations)/n )\n\n return std_dev", "def get_sterr(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n from math import sqrt\n return cls.get_stdev(data, is_population) / sqrt(cls.get_n(data))", "def _gsd(self, df):\n\n # calculate the GM in units of microns\n gm = df['GM'] * 1e-3\n\n # drop GM. At this point, the row will have the \n # histogram and nothing else\n df = df.drop(\"GM\", axis=1)\n\n # calculate the geometric standard deviation\n gsd = np.exp(\n np.sqrt(\n df\n .mul(\n (df*0)\n .add(np.log(self.midpoints), axis=1)\n .sub(np.log(gm), axis=0)\n **2\n )\n .sum(axis=1)\n / (df.sum(axis=1))\n )\n )\n\n return gsd", "def deviation(xs):\n\ta = avg(xs)\n\treturn sqrt(sum([(x - a) ** 2 for x in xs]) / (len(xs) - 1))", "def std_dev(l):\n return variance(l)**.5", "def calc_sq_std(df):\n\n sq_std = df.dropna()\n\n sq_std = (df['std'].divide(df['mean']))**2\n\n sq_std.name = 'sq_std'\n\n sq_std = pd.DataFrame(sq_std)\n\n sq_std = sq_std.dropna()\n\n return sq_std", "def standard_deviation_over(individual, test_data, truth_data, name=None, tolerance=0):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n over_predicted_inds = get_over_predicted_inds(test_data,\r\n truth_data,\r\n tolerance)\r\n test_subset = test_data[over_predicted_inds]\r\n truth_subset = truth_data[over_predicted_inds]\r\n return overall_standard_deviation(individual, test_subset, truth_subset)", "def _compute_global_std(self, dataset, session, limit=None):\n _dataset = dataset\n std = 0.\n if isinstance(limit, int):\n _dataset = _dataset[:limit]\n if isinstance(_dataset, np.ndarray) and not self.global_std_pc:\n std = np.std(_dataset)\n else:\n for i in range(len(dataset)):\n if not self.global_std_pc:\n std += np.std(dataset[i]) / len(dataset)\n else:\n std += (np.std(dataset[i], axis=(0, 1),\n keepdims=True) / len(dataset))[0][0]\n self.global_std.assign(std, session)\n return std", "def get_std(self):\n std_value = self.df[self.col_name].std()\n return std_value", "def build_std(self):\n param = self.param\n meansp = self.mean()\n stdsp = self.std()\n num_noise = 200\n noise = np.random.normal(1,0.005,(num_noise,self.wvl.size)) # add 0.5% variance to signal at all wavelengths\n # should be at every sp in utc, but for now, use mean sp\n sp_arr = meansp*noise\n #import code; code.interact(local=locals())\n par_noisy = np.array(list(map(lambda tt:param(sp_arr[tt,:],self.wvl),xrange(num_noise))))\n notaxis = tuple(np.where(par_noisy.shape != self.npar)[0])\n stdpar = np.nanstd(par_noisy,axis=notaxis)\n self.stdpar = stdpar\n return stdpar", "def em_std(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical standard deviation.')\n return math.sqrt(\n (self.__sum_of_square_reward -\n self.__total_rewards**2 / self.__total_pulls) / self.__total_pulls)", "def stddev(data, ddof=0):\n n = len(data)\n if n < 2:\n return 0\n ss = _ss(data)\n pvar = ss/(n-ddof)\n return pvar**0.5", "def std(self):\n if self.dirty:\n self._finalize()\n if self.vvar is None:\n return 0\n else:\n return math.sqrt(self.vvar)", "def stddev(self, num_list):\n try:\n mean = self.average(num_list)\n\n minus_mean = []\n\n for number in num_list:\n try:\n minus_mean.append((number - mean) ** 2)\n except Exception as e:\n print(\"Error: \", e)\n\n meany_mean = self.average(minus_mean)\n\n meany_mean = meany_mean ** .5\n\n except Exception as e:\n print(\"Error: \", e)\n\n return meany_mean", "def divide_by_std_across_trials(self):\n if not hasattr(self, 'mean_across_trials_subtracted_data'):\n self.subtract_mean_across_trials()\n self.std_across_trials_divided_data = \\\n self.mean_across_trials_subtracted_data / \\\n np.std(self.mean_across_trials_subtracted_data,\n axis=1, keepdims=True)", "def std(self) -> \"Stream[float]\":\n return self.agg(lambda x: np.std(x, ddof=1)).astype(\"float\")", "def std_dev(list_num):\n\n # Calculate the mean of the numbers\n mean = sum(list_num)/len(list_num)\n\n # Initialise a variable to hold the sum of the squared distance to the mean\n sum_sqrd_dist = 0\n \n # Iterate over the numbers\n for num in list_num:\n # Subtract the mean from the number and square the result\n sqrd_dist = (num - mean)**2\n # Add the number to the sum of the squared distances \n sum_sqrd_dist = sum_sqrd_dist + sqrd_dist\n\n # return the square root of the sum of squared distances divided by the length of the list\n return (sum_sqrd_dist/len(list_num))**(1/2)", "def sd(self, iterations = None):\n # \n if iterations == None :\n iterations = self.imax\n #\n for i in range(iterations):\n #\n # go in the direction of steepest descent\n d = -self.df(self.x)\n #\n # perform a line search of f along d\n x, status = self.line_search(self.x, d)\n #\n # if status is False then the local curvature is a maximum\n # just do a fixed stepsize in this case\n if status == False :\n self.x = self.x + d\n else :\n self.x = x\n #\n # calculate the error\n self.errors.append(self.f(self.x))\n self.iters = self.iters + 1\n if self.iters > self.imax or (self.errors[-1] < self.e_tol):\n break\n #\n return self.x", "def F_std(d, N):\n # memoize specht() and weyl() results (but only for current call)\n specht_mem, weyl_mem = memoize(specht), memoize(weyl)\n\n return sum(\n d ** (-N - 2)\n * sum(sqrt(specht_mem(mu) * weyl_mem(d, mu)) for mu in box_added(alpha, d)) ** 2\n for alpha in Partitions(n=N - 1, max_length=d)\n )", "def stdev_from_mean(x):\r\n x = array(x)\r\n return (x - mean(x)) / std(x)", "def getSTD(self, field):\n return np.std([self.fitnesses[i][field] for i in range(len(self.fitnesses))])", "def std_ddof(self, ddof=0):\n return np.sqrt(self.var_ddof(ddof=ddof))", "def calculate_std(self):\n # from 6:00 to 1:00\n start = (6-4)*60*60 / self.interval_length_s - self.intervals_offset\n end = (25-4)*60*60 / self.interval_length_s - self.intervals_offset\n v = [] # this is for the 24h consumptions\n v2 = [] # this is for the std of the interval consumptions\n for i in range(start,end): # i: end-time of a day-interval / everything between 6:00 and 1:00\n for i1 in range(i,len(self.intervals)-1,self.intervals_per_day): # check all possible end-times (skip the very last interval)\n if i1 >= 0:\n i0 = i1 - self.intervals_per_day # i0: start of the day-interval\n if i0 >= 0: # within measured time?\n v.append(sum(self.intervals[i0:i1])) # 24h consumption\n cmp_interval = self.cmp_interval(i1)\n d = self.interval_consumption2power(self.intervals[i1] - self.consumption_per_interval_smoothed[cmp_interval]) # in W!\n v2.append(d*d)\n if len(v) > 5:\n self.std = np.std(v)\n if len(v2) > 5:\n self.std_intervals = sqrt(np.mean(v2))", "def mean_stddev(self):\n if len(self.vs) == 0:\n raise StdDevFilterException\n\n mx = self.mean()\n # compute variance\n variance = sum([(x - mx)**2 for x in self.vs])/len(self.vs)\n # return mean value and standard deviation (square root of variance)\n return mx,math.sqrt(variance)", "def test_stdev_from_mean(self):\r\n x = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = stdev_from_mean(x)\r\n self.assertFloatEqual(\r\n result,\r\n [-1.292463399014413,\r\n -0.60358696806764478,\r\n -0.045925095396451399,\r\n 0.77416589382589174,\r\n 1.1678095686526162])", "def deviation(values, val):\n\tm = mean(values)\n\tdev = abs(val-m)\n\tsd = standard_deviation(values)\n\treturn float(dev)/sd if sd!=0 else 0.0", "def std(x):\n return sqrt(TinyStatistician.var(x))", "def stddev(std_numbers):\n mean = sum(std_numbers) / float(len(std_numbers))\n sum_std = 0.0\n\n for x in std_numbers:\n sum_std += (mean - x) * (mean - x)\n\n variance = sum_std / float(len(std_numbers))\n stddev = math.sqrt(variance)\n\n return stddev", "def get_mean_stddev(self):\n return self.get_mean(), self.get_std_dev()", "def stdev(data, xbar=None):\n return math.sqrt(variance(data, xbar))", "def avg_std_dev(positions):\n # print(\"len pos = \", len(positions))\n # print(positions)\n if sum(positions) == 0:\n the_mean = 0\n standard_dev = 0\n return the_mean, standard_dev \n try:\n the_mean = sum(positions) / float(len(positions))\n standard_dev = numpy.std(positions)\n except ValueError:\n the_mean = 0\n standard_dev = 0\n return the_mean, standard_dev", "def _standardize(self):\n deviation = np.std(self.series)\n self.series = (self.series - np.mean(self.series)) / (deviation if deviation != 0 else 1)", "def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))", "def GetStandardDeviation(vals_l, mean):\n\n\n sum_deviations_squared = 0\n\n for x in vals_l:\n sum_deviations_squared += (x - mean)**2\n\n return math.sqrt(float(sum_deviations_squared)/float(len(vals_l)))", "def standard_deviation(scores):\n num_scores = len(scores)\n if num_scores == 0: return 0\n\n mean_score = mean(scores, False)\n sum_x2 = sum(score**2 for score in scores)\n std_dev_score = (sum_x2/num_scores - mean_score ** 2) ** 0.5\n return round(std_dev_score, 2)", "def lsamplestdev (inlist):\r\n return math.sqrt(samplevar(inlist))" ]
[ "0.7485085", "0.73712903", "0.73565", "0.72915614", "0.7247638", "0.7231712", "0.72311544", "0.7163135", "0.7097697", "0.70636594", "0.70502454", "0.7017676", "0.7010243", "0.6961241", "0.6959725", "0.69366497", "0.6936612", "0.6928692", "0.6888086", "0.68561345", "0.6811737", "0.6811737", "0.67981315", "0.6796534", "0.6779959", "0.6777555", "0.6777555", "0.6765702", "0.6762304", "0.67588985", "0.67522615", "0.67373455", "0.6734263", "0.670396", "0.66794723", "0.6674129", "0.6654", "0.6643958", "0.66430134", "0.6618478", "0.6615834", "0.6601562", "0.65986675", "0.6597651", "0.65901226", "0.65831137", "0.6582219", "0.65693974", "0.6569154", "0.6566945", "0.65659237", "0.6565106", "0.65623564", "0.6559682", "0.65459865", "0.6520676", "0.65188426", "0.6509265", "0.64988697", "0.64951944", "0.64666575", "0.64616954", "0.64371717", "0.6433107", "0.6429934", "0.6409799", "0.6404507", "0.64035404", "0.64017165", "0.64013076", "0.6400281", "0.63922405", "0.6391606", "0.6376818", "0.63733107", "0.6370705", "0.6355515", "0.63455856", "0.6331944", "0.63284546", "0.6307314", "0.6300044", "0.6298085", "0.62940526", "0.62868375", "0.6284708", "0.62811255", "0.62725145", "0.6270411", "0.62655705", "0.62611204", "0.62480974", "0.62471735", "0.6242884", "0.62309647", "0.6226043", "0.62255687", "0.6212322", "0.6206364", "0.61991465", "0.61973953" ]
0.0
-1
Checks that the solution vector is within optimal problem's boundaries.
def _check(self, vector): for i, elmt in enumerate(vector): # checks lower bound if (elmt < self.lower[i]): vector[i] = self.lower[i] # checks upper bound elif (elmt > self.upper[i]): vector[i] = self.upper[i] return vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feasible(self):\n return self.lowBound <= self._value <= self.upBound", "def in_box_bounds(self, test_vec):\n above_min = np.greater(test_vec, self.lower_vertex).all()\n below_max = np.greater(self.upper_vertex, test_vec).all()\n return above_min and below_max", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def _checkImprovedSolution(self, vertex):\n if self._bestPathVertex.isSolution():\n solWayPoint = sys.maxint\n else:\n solWayPoint = self._bestPathVertex.getNextWaypoint().getIndex()\n\n if vertex.isSolution():\n vxWayPoint = sys.maxint\n else:\n vxWayPoint = vertex.getNextWaypoint().getIndex()\n\n # If waypoints are the same (possibly both at final waypoint) then we compare priority which is a better indicator than admissible.\n if solWayPoint == vxWayPoint:\n return self._bestPathVertex.getTimeThroughHeuristic() > vertex.getTimeThroughHeuristic()\n\n return solWayPoint < vxWayPoint", "def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False", "def test_velocity_boundaries(self):\n L_x = self.x_edge[-1]\n np.testing.assert_array_almost_equal(self.v_box(self.t, 0), 0, decimal=4)\n np.testing.assert_array_almost_equal(self.v_box(self.t, L_x), 0, decimal=4)", "def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True", "def _check_bound(self, q):\n mat = ur_utils.forward(q, self._ik_params)\n xyz = mat[:3, 3]\n inside_bound = np.all(self._end_effector_low <= xyz) and np.all(xyz <= self._end_effector_high)\n inside_buffer_bound = (np.all(self._end_effector_low + self._box_bound_buffer <= xyz) and \\\n np.all(xyz <= self._end_effector_high - self._box_bound_buffer))\n return inside_bound, inside_buffer_bound, mat, xyz", "def check_bounds(self, index):\n if index < self.lower_bound or index > self.upper_bound:\n return False\n return True", "def test_v_bounds(self):\n n = 50\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n G.i_ext_init = np.linspace(-1.0, 1.0, n)\n\n class BoundsChecker(object):\n def __init__(self, target):\n self.target = target\n self.small = None\n self.large = None\n self.order = 1\n\n def evolve(self, t, dt):\n small = np.min(self.target.v)\n large = np.max(self.target.v)\n if self.small is None or self.small > small:\n self.small = small\n if self.large is None or self.large < large:\n self.large = large\n \n M = BoundsChecker(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n self.assertLess(M.large, G.v_th)", "def is_legal_solution(self, solution):\r\n if self.sorting_order is ScoresSortingOrder.ASCENDING:\r\n return self.fit_score(solution) == 0\r\n else:\r\n return self.fit_score(solution) == sum(x for x in range(1, 12))", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def boundary_check(limits : tuple, coords : tuple) -> bool:\n xl,xh,yl,yh = limits\n x,y = coords\n bound_x = xl <= x and x < xh\n bound_y = yl <= y and y < yh\n return bound_x and bound_y", "def is_solvable(self) -> bool:\r\n inv_count = 0\r\n arr = self.current_state.flatten()\r\n for i in range(0, 9):\r\n for j in range(i + 1, 9):\r\n if arr[j] and arr[i] and arr[i] > arr[j]:\r\n inv_count += 1\r\n return inv_count % 2 == 0", "def checkSolution(self):\n movesToEndblock = self.gridSize - self.changeable[0] - 2\n if self.checkMove(0,movesToEndblock) == 0:\n return 0\n return 1", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def isSolInfeasible(self, x : pd.Series):\n\n #\n # respect solution checker output, if it exists\n #\n if x.get(Key.SolCheckerRead) is not None:\n if not pd.isnull(x.get(Key.SolCheckerRead)) and x.get(Key.SolCheckerRead):\n if not pd.isnull(x.get(Key.SolCheckerFeas)) and x.get(Key.SolCheckerFeas):\n return False\n else:\n return True\n\n\n\n # compute the maximum violation of constraints, LP rows, bounds, and integrality\n maxviol = max((x.get(key, 0.0) for key in [Key.ViolationBds, Key.ViolationCons, Key.ViolationInt, Key.ViolationLP]))\n\n # if no violations have been recorded, no solution was found, and the solution is not infeasible.\n if pd.isnull(maxviol):\n return False\n\n return maxviol > self.feastol", "def _guess_firm_size_upper_too_low(self, bound, tol):\n theta = self.ivp.y[1]\n return abs(theta - bound) / theta <= tol # use relative values!", "def isInRange(val, minv, maxv):\n\treturn val >= minv and val <= maxv", "def out_of_bounds(self):\n return self.rect.right <= 0", "def has_solution(self) -> bool:\n pass", "def _check_init_solution(self):\r\n B = np.array([self._A[:, j] for j in self._basic_vars]).transpose()\r\n self._B_inv = np.linalg.inv(B)\r\n x_B = self._B_inv @ self._b\r\n for x in x_B:\r\n if x < 0:\r\n raise AssertionError(\"Initial solution is not feasible!\")", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def checkVertices(vertices, limits):\n isWithin = True\n for i,v in enumerate(vertices):\n x = v[0]\n y = v[1]\n z = v[2]\n if x < limits[0][0] or x > limits[0][1]:\n isWithin = False\n break\n if y < limits[1][0] or y > limits[1][1]:\n isWithin = False\n break\n if z < limits[2][0] or z > limits[2][1]:\n isWithin = False\n break\n return isWithin", "def isFeasible(self):\n if self.function.constraints(self.position[0],self.position[1]) == False:\n self.position = np.array([random.uniform(-50,50), random.uniform(-50,50)]) \n self.velocity = np.array([random.uniform(-1,1), random.uniform(-1,1)])", "def within_tolerance(a_vec, b_vec, tol_vec):\n\tfor a, b, tol in zip(a_vec, b_vec, tol_vec):\n\t\tif abs(a - b) > tol:\n\t\t\treturn False\n\treturn True", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def test_constraint_satisfaction(self, velocity_pc_data):\n data, pc = velocity_pc_data\n path, ss, vlim = data\n\n constraint_param = pc.compute_constraint_params(path, ss)\n _, _, _, _, _, _, xlimit = constraint_param\n\n qs = path(ss, 1)\n N = ss.shape[0] - 1\n\n sd = cvx.Variable()\n\n for i in range(0, N + 1):\n # 2. Compute max sd from the data\n constraints = [qs[i] * sd <= vlim[:, 1],\n qs[i] * sd >= vlim[:, 0],\n sd >= 0, sd <= JVEL_MAXSD]\n prob = cvx.Problem(cvx.Maximize(sd), constraints)\n try:\n prob.solve(solver=cvx.ECOS, abstol=1e-9)\n xmax = sd.value ** 2\n\n prob = cvx.Problem(cvx.Minimize(sd), constraints)\n prob.solve(solver=cvx.ECOS, abstol=1e-9)\n xmin = sd.value ** 2\n except cvx.SolverError:\n continue\n\n # 3. They should agree\n npt.assert_allclose([xmin, xmax], xlimit[i], atol=SMALL)\n\n # Assert non-negativity\n assert xlimit[i, 0] >= 0", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.to_grid == self.from_grid", "def _check_bounds(self, x_new):\n\n # If self.bounds_error is True, we raise an error if any x_new values\n # fall outside the range of x. Otherwise, we return an array indicating\n # which values are outside the boundary region.\n below_bounds = x_new < self.x[0]\n above_bounds = x_new > self.x[-1]\n\n # !! Could provide more information about which values are out of bounds\n if self.bounds_error and below_bounds.any():\n raise ValueError(\"A value in x_new is below the interpolation \"\n \"range.\")\n if self.bounds_error and above_bounds.any():\n raise ValueError(\"A value in x_new is above the interpolation \"\n \"range.\")\n\n # !! Should we emit a warning if some values are out of bounds?\n # !! matlab does not.\n out_of_bounds = logical_or(below_bounds, above_bounds)\n return out_of_bounds", "def check(self, parameters):\n if np.any(parameters < self._lower):\n return False\n if np.any(parameters > self._upper):\n return False\n return True", "def check_cl_constraints(vector):\n\tglobal __cl_constraints\n\n\tfor con in __cl_constraints:\n\t\t# a vector is not allowed to hold for both\n\t\tif vector[con[0]] == 1 and vector[con[1]] == 1:\n\t\t\treturn False\n\treturn True", "def success(self, x, tol=1.e-5):\n val = self.fun(asarray(x))\n if abs(val - self.fglob) < tol:\n return True\n\n # the solution should still be in bounds, otherwise immediate fail.\n if np.any(x > np.asfarray(self.bounds)[:, 1]):\n return False\n if np.any(x < np.asfarray(self.bounds)[:, 0]):\n return False\n\n # you found a lower global minimum. This shouldn't happen.\n if val < self.fglob:\n raise ValueError(\"Found a lower global minimum\",\n x,\n val,\n self.fglob)\n\n return False", "def violations(robot, q):\n ub = np.array(variable_dict_to_list(robot.ub))\n lb = np.array(variable_dict_to_list(robot.lb))\n ja_violations = (q > ub) | (q < lb)\n return any(ja_violations)", "def test_viable(self,outs):\n \n viable = True\n for i,temp_i in enumerate(outs):\n if (temp_i <= self.mins[i+4]):\n viable = False\n elif (temp_i >= self.maxes[i+4]): \n viable = False\n return viable", "def has_bounds(self):\r\n bounds = self.bounds\r\n if bounds in (None, [None, None]):\r\n return False\r\n for i in xrange(bounds[0]):\r\n if bounds[0][i] is not None and bounds[0][i] > -np.inf:\r\n return True\r\n for i in xrange(bounds[1]):\r\n if bounds[1][i] is not None and bounds[1][i] < np.inf:\r\n return True\r\n return False", "def is_solution(self, csp):\n return self.is_consistent(csp.get_constraints()) and self.is_complete(csp.get_variables())", "def is_solved(self):\n return self._start == self._target", "def checkInBound(self,value,checkEdge):\n assert(checkEdge==0 or checkEdge==1)\n if checkEdge==0: # width\n assert(value>=0 and value<self.w)\n else:\n assert(value>=0 and value<self.h)", "def check_ml_constraints(vector):\n\tglobal __ml_constraints\n\n\tfor con in __ml_constraints:\n\t\t# a vector must be either contained or missing for both instances *con* tuple\n\t\tif not (vector[con[0]] == vector[con[1]]):\n\t\t\treturn False\n\treturn True", "def ok(self, solution):\n if self.constraints is not None:\n for constraint in self.constraints:\n if not constraint(solution):\n return False\n return True", "def _check_convergence(current_position,\n next_position,\n current_objective,\n next_objective,\n next_gradient,\n grad_tolerance,\n f_relative_tolerance,\n x_tolerance):\n grad_converged = _check_within_tolerance(next_gradient, grad_tolerance)\n x_converged = _check_within_tolerance(next_position - current_position,\n x_tolerance)\n f_converged = _check_within_tolerance(\n next_objective - current_objective,\n f_relative_tolerance * current_objective)\n return grad_converged | x_converged | f_converged", "def check_convexity(hull, used_pivots):\n for instance in used_pivots:\n if not check_inside_hull(hull, instance):\n return False\n return True", "def check_limits(self):\n\n #Find the relative position of each leg vs. its \"zero\" position\n relpos = self.fixed_plate - self.fixed_plate_zero\n\n for leg in range(3):\n #Check that the leg is within allowable \"safe zone\"\n #Use the position of the leg (relative to 0) to find the index in the \"safe zone\" matrix\n i_x = nearest_index(self.leg_safe_xaxis, relpos[COORD_X, leg])\n i_z = nearest_index(self.leg_safe_zaxis, relpos[COORD_Z, leg])\n #Look up in the safe zone.\n self.leg_fault[leg] = (not self.leg_safe_zone[leg, i_x, i_z])\n\n if (not all(np.isreal(self.fixed_plate[:, leg]))) or any(np.isnan(self.fixed_plate[:, leg])):\n #A complex or NaN value = the angle found for the leg was invalid, meaning that the\n #leg would have to be longer to reach the desired position.\n self.leg_fault[leg] = True", "def _check_approx_fixed_point(V_current, V_previous, tol):\n\n # Compute the sup norm between `V_current` and `V_previous`\n sup_norm = np.max(np.abs(V_current - V_previous))\n\n # Algorithm termination condition\n fp = sup_norm <= tol\n\n return fp, sup_norm", "def isFeasible(self, A):\n\t\treturn False", "def _isInAllowedRange( self, testval, refval, reltol=1.e-2 ):\n denom = refval\n if refval == 0:\n if testval == 0:\n return True\n else:\n denom = testval\n rdiff = (testval-refval)/denom\n del denom,testval,refval\n return (abs(rdiff) <= reltol)", "def __is_valid(self, subscript):\n return ((0,0) <= subscript and subscript < self.size)", "def test_out_of_bounds(oob_from, oob_to):\n with pytest.raises(ValueError):\n haversine_vector([oob_from], [oob_to])\n with pytest.raises(ValueError):\n haversine_vector([oob_from], [oob_to], normalize=False)", "def box_valid(self):\n return ((self.lt.x >= 0)\n and (self.lt.y >= 0)\n and (self.rb.x >= self.lt.x)\n and (self.rb.y >= self.lt.y))", "def is_solved(self):\n self.solved = self.current_pos == self.finish_pos\n return self.solved", "def check_solvability(self, state):\n\n inversion = 0\n for i in range(len(state)):\n for j in range(i, len(state)):\n if state[i] > state[j] != 0:\n inversion += 1\n\n return inversion % 2 == 0", "def isViable() -> bool:\n if I != 0:\n return True\n return False", "def is_converged(old_vector, new_vector, threshold=0.0000001):\n diff = new_vector.points - old_vector.points\n #diff = numpy.power(diff, 2)\n diff = diff**2\n maximum = numpy.max(diff)\n\n #print \"new vector: \", new_vector.as_vector()\n #print \"old vector: \", old_vector.as_vector()\n #print \"new difference: \", maximum\n #print \"*\" * 30\n\n #a = raw_input(\"Dalje?\")\n\n if maximum > threshold:\n return False\n else:\n return True", "def testoptdone(self):\r\n assert self.data.optdone\r\n target_e, target_g, target_s = self.data.geotargets\r\n value_e, value_g, value_s = self.data.geovalues[-1]\r\n converged = (value_e < target_e and value_g < target_g) or (value_g < target_g and value_s < target_s)\r\n assert converged", "def check_bounds(x, param_name):\n for i in range(len(x)):\n if ((xmin[param_name][i] is not None and x[i] < xmin[param_name][i]) or\n (xmax[param_name][i] is not None and x[i] > xmax[param_name][i])):\n return False\n return True", "def _test_expected_error_bound(bellman_error_margin, optimal_res, test_res, mdp):\n test_eval = test_res.policy.evaluate_on(mdp)\n test_occ = test_eval.state_occupancy #occupancy from start state\n test_start_steps = sum(test_occ.values())\n value_diff = test_res.initial_value - optimal_res.initial_value\n within_bound = 0 <= value_diff <= bellman_error_margin*test_start_steps\n if not within_bound:\n raise OutOfExpectedErrorBound\n return bellman_error_margin*test_start_steps", "def isSolvableVect(a, b, c):\n return all(isSolvableBool(ai, bi, ci) for (ai, bi, ci) in zip(a, b, c))", "def has_solution(self) -> bool:\n if self in [self.SATISFIED, self.ALL_SOLUTIONS, self.OPTIMAL_SOLUTION]:\n return True\n return False", "def check_constrained(self, theta):\n #initially no flips\n sign = np.ones_like(theta)\n oob = True #pretend we started out-of-bounds to force at least one check\n #print('theta_in ={0}'.format(theta))\n while oob:\n above = theta > self.upper\n theta[above] = 2*self.upper[above] - theta[above]\n sign[above] *= -1\n below = theta < self.lower\n theta[below] = 2*self.lower[below] - theta[below]\n sign[below] *= -1\n oob = np.any(below | above)\n #print('theta_out ={0}'.format(theta))\n return theta, sign, oob", "def boundary_checker(stage, player_new):\n # Go through each possible direction a player can travel\n if player_new[0] == 0:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[1] == 0:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[0] > stage[0]:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[1] > stage[1]:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n # Flag validity if player still within bounds of map\n else:\n valid = True\n\n return valid", "def checkWithinBound(rowWithinBound,colWithinBound):\n if(rowWithinBound == 0 and colWithinBound == 0):\n return True\n else:\n return False", "def testoptdone(self):\r\n assert self.data.optdone\r\n convergence = numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets\r\n assert sum(convergence) >= 2", "def in_bounds(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n for i, coordinate in enumerate(point):\n if coordinate > self.dimensions[i] or coordinate < 0:\n return False\n\n return True", "def is_solved(self) -> bool:\n return set(self.boxes) == set(self.storage_locations)", "def is_approximated_solution(p):\n tr = float(p[\"optThreshold\"])\n # TODO: finish\n k = \"result.best.verificationDecision\"\n return p[\"result.best.isOptimal\"] == \"true\" and p[k] == \"unsat\"", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True", "def within_limits(self, pos=None, pypslog=False):\n hilim = self.get_hilim()\n lolim = self.get_lowlim()\n if pos is None:\n pos = self.wm()\n pos_ok = lolim < pos < hilim\n msg = \"Position {0} outside of limits {1} and {2} for motor {3} (pv {4})\"\n if not pos_ok:\n error = msg.format(pos, lolim, hilim, self.name, self.pvname)\n if pypslog:\n logprint(error)\n else:\n print error\n return pos_ok", "def out_of_bounds(self):\n return self._parms.get(\"out_of_bounds\")", "def check_termination(self):\r\n \r\n # First check if we are doing termination based on running time\r\n if (self.options.time_limit):\r\n self.time = time.clock - self.time_start\r\n if (self.time >= self.options.maxtime):\r\n self.term_reason = 'Exceeded time limit'\r\n return\r\n \r\n # Now check if we are doing break by tolx\r\n if (self.options.use_tolx):\r\n if (np.sqrt(cua.dot(self.dx,self.dx).get())/\r\n np.sqrt(cua.dot(self.oldx,self.oldx).get()) < self.options.tolx):\r\n self.term_reason = 'Relative change in x small enough'\r\n return\r\n \r\n # Are we doing break by tolo (tol obj val)\r\n if (self.options.use_tolo and self.iter > 2):\r\n delta = abs(self.obj-self.oldobj)\r\n if (delta < self.options.tolo):\r\n self.term_reason ='Relative change in objvalue small enough'\r\n return\r\n\r\n # Check if change in x and gradient are small enough\r\n # we don't want that for now\r\n# if (np.sqrt((cua.dot(self.dx,self.dx).get())) < self.options.tolx) \\\r\n# or (np.sqrt(cua.dot(self.dg,self.dg).get()) < self.options.tolg):\r\n# self.term_reason = '|x_t+1 - x_t|=0 or |grad_t+1 - grad_t| < 1e-9'\r\n# return\r\n \r\n # Finally the plain old check if max iter has been achieved\r\n if (self.iter >= self.options.maxiter):\r\n self.term_reason = 'Maximum number of iterations reached'\r\n return\r\n \r\n # KKT violation\r\n if (self.options.use_kkt):\r\n if np.abs(np.sqrt(cua.dot(self.x,self.grad).get())) <= options.tolk:\r\n self.term_reason = '|x^T * grad| < opt.pbb_gradient_norm'\r\n return\r\n \r\n # Gradient check\r\n if (self.options.use_tolg):\r\n nr = cua.max(cua.fabs(self.grad)).get();\r\n if (nr < self.options.tolg):\r\n self.term_reason = '|| grad ||_inf < opt.tolg'\r\n return\r\n \r\n # No condition met, so return false\r\n self.term_reason = 0;", "def in_range_if_outside(self, nanobot):\n nearest_point_on_cube = []\n for axis in range(3):\n c = nanobot.coord[axis]\n if c < self.mins[axis]:\n nearest_point_on_cube.append(self.mins[axis])\n elif c > self.maxs[axis]:\n nearest_point_on_cube.append(self.maxs[axis])\n else:\n nearest_point_on_cube.append(c)\n\n return manhattan_dist(nearest_point_on_cube, nanobot.coord) <= nanobot.r", "def detect_in_bounds(self):\n creature_x, creature_y = self.creature.current_location\n if creature_x < 0 or creature_x >= self.world_width\\\n or creature_y < 0 or creature_y >= self.world_height:\n print('The creature is out of bounds!')\n return False\n return True", "def is_valid(self):\n posit1 = (self.mean_v > 0) & (self.kappa_y > 0) & (self.eta_y > 0)\n posit2 = (self.kappa_s > 0) & (self.eta_s > 0)\n return posit1 & posit2 & self.feller()", "def is_solved(self):\n # Iterate through each square of the puzzle\n for row in range(self.sl):\n for col in range(self.sl):\n val = self.puzzle[row][col]\n\n # If any square value is blank (0), not solved, return False\n if val == 0:\n return False\n\n # Trick to keep DRY code: replace each value temporarily with a\n # 0, and use valid_square method with original value to determine\n # if every square is valid\n self.puzzle[row][col] = 0\n valid = self.valid_square(row, col, val)\n self.puzzle[row][col] = val\n \n # If not a valid value for square, return False\n if not valid:\n return False\n return True", "def evaluate_objective(x):\n\n x_points_cartesian = x_to_cartesian(x)\n hull = ConvexHull(x_points_cartesian)\n\n # Return the negative value because the optimization is a minimization\n return -hull.volume", "def check_constrained(self, theta):\n\n #initially no flips\n sign = np.ones_like(theta)\n oob = True #pretend we started out-of-bounds to force at least one check\n #print('theta_in ={0}'.format(theta))\n while oob:\n above = theta > self.upper\n theta[above] = 2*self.upper[above] - theta[above]\n sign[above] *= -1\n below = theta < self.lower\n theta[below] = 2*self.lower[below] - theta[below]\n sign[below] *= -1\n oob = np.any(below | above)\n #print('theta_out ={0}'.format(theta))\n return theta, sign, oob", "def check_constrained(self, limit=None):\n\n # Set the 'well-constrained' limit at 10% (arbitrary) if not provided.\n limit = (Decimal(0.1) if not limit else Decimal(limit))\n\n if is_empty(self.value) or is_empty(self.uncertainty):\n return False\n elif self.uncertainty > (Decimal(self.value) * Decimal(limit)):\n self.well_constrained = False\n else:\n self.well_constrained = True", "def check_map_obstacle_has_sight(self):\n return self.map_obstacle.sight_range > 0", "def isSolFeasible(self, x : pd.Series):\n #\n # respect solution checker output, if it exists\n #\n if x.get(Key.SolCheckerRead) is not None:\n #\n # if this column is not None, the solution checker output exists for at least some of the problems\n # such that it is reasonable to assume that it should exist for all parsed problems\n #\n # recall that we explicitly assume that there has been a solution reported when this function is called\n # if the solution checker failed to read in the solution, or the solution checker crashed and did\n # not report the result of the check command, the solution was most likely infeasible.\n #\n if not pd.isnull(x.get(Key.SolCheckerRead)) and x.get(Key.SolCheckerRead):\n if not pd.isnull(x.get(Key.SolCheckerFeas)) and x.get(Key.SolCheckerFeas):\n return True\n else:\n return False\n else:\n return False\n\n # compute the maximum violation of constraints, LP rows, bounds, and integrality\n maxviol = max((x.get(key, 0.0) for key in\n [Key.ViolationBds, Key.ViolationCons, Key.ViolationInt, Key.ViolationLP]))\n\n return maxviol <= self.feastol", "def out_of_bounds(self):\n return self.rect.right <= 0 or self.rect.left >= self.screen_rect.width", "def inside_limits(self, point):\n if not self.regions:\n # Use rectangle check\n lat, lon = point.latitude, point.longitude\n if (lon > self.limits[0] and lat > self.limits[1] and\n lon < self.limits[2] and lat < self.limits[3]):\n return True\n else:\n return False\n else:\n # Check inside all possible regions\n p = Point((point.longitude, point.latitude))\n print(p, point)\n # import IPython; IPython.embed()\n for name, poly in self.regions.items():\n # if poly.contains(p):\n if p.intersects(poly):\n return name\n return False", "def is_exceptional(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_out(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True", "def voltageHighEnough(self, Vm):\n if Vm > 230 * 0.88:\n return True\n else:\n return False", "def in_field(self, vec):\n return (abs(vec[0]) + abs(vec[1]) + abs(vec[2])) <= 2 * self.n", "def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False", "def is_constrained(value, min_acceptable=None, max_acceptable=None):\n if min_acceptable is not None and value < min_acceptable:\n return False\n if max_acceptable is not None and value > max_acceptable:\n return False\n return True", "def inBounds(self,pos):\n return ((pos.x<WIDTH) & (pos.x>=0) & (pos.y<HEIGHT) & (pos.y>=0))", "def check_answer(desired, guessed, error_margin):\n return (desired < guessed + error_margin and desired > guessed - error_margin)", "def is_inside(inner_path, outer_path):\r\n if not hasattr(inner_path, 'bounding_box'):\r\n inner_path.bounding_box = CutPlanner.bounding_box(inner_path)\r\n if not hasattr(outer_path, 'bounding_box'):\r\n outer_path.bounding_box = CutPlanner.bounding_box(outer_path)\r\n if outer_path.bounding_box[0] > inner_path.bounding_box[0]:\r\n # outer minx > inner minx (is not contained)\r\n return False\r\n if outer_path.bounding_box[1] > inner_path.bounding_box[1]:\r\n # outer miny > inner miny (is not contained)\r\n return False\r\n if outer_path.bounding_box[2] < inner_path.bounding_box[2]:\r\n # outer maxx < inner maxx (is not contained)\r\n return False\r\n if outer_path.bounding_box[3] < inner_path.bounding_box[3]:\r\n # outer maxy < inner maxy (is not contained)\r\n return False\r\n if outer_path.bounding_box == inner_path.bounding_box:\r\n if outer_path == inner_path: # This is the same object.\r\n return False\r\n if not hasattr(outer_path, 'vm'):\r\n outer_path = Polygon([outer_path.point(i / 100.0, error=1e4) for i in range(101)])\r\n vm = VectorMontonizer()\r\n vm.add_cluster(outer_path)\r\n outer_path.vm = vm\r\n for i in range(101):\r\n p = inner_path.point(i / 100.0, error=1e4)\r\n if not outer_path.vm.is_point_inside(p.x, p.y):\r\n return False\r\n return True", "def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True", "def is_solved(self):\n if not self._find_empty():\n return True\n else:\n return False", "def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True", "def _inside_op_range(self, idx):\n\n if idx < self._parameters.op_range[0]:\n return False\n return (self._parameters.op_range[1] < 0 or\n idx <= self._parameters.op_range[1])", "def check_optimization_sanity(self):\n if len(self.parameters) == 0:\n msg = \"No parameters defined. Optimization not possible.\"\n raise ValueError(msg)\n\n if len(self.constraints) == 0:\n msg = \"No constraints defined. Optimization not possible.\"\n raise ValueError(msg)" ]
[ "0.6974493", "0.6576027", "0.6430475", "0.6387279", "0.63408345", "0.63387513", "0.6308674", "0.62025046", "0.6115721", "0.6056194", "0.60385", "0.6000666", "0.59949714", "0.594604", "0.593078", "0.59297556", "0.5917851", "0.5897417", "0.5875833", "0.58327293", "0.58318603", "0.5811984", "0.58111763", "0.58044493", "0.58036155", "0.57944465", "0.5776491", "0.5763019", "0.5750451", "0.57502764", "0.57356703", "0.57356703", "0.57356703", "0.5727833", "0.5726054", "0.57056576", "0.5683159", "0.5671648", "0.567132", "0.5662628", "0.5655017", "0.5641789", "0.5630695", "0.56268334", "0.5612544", "0.5612139", "0.56069183", "0.5594508", "0.55926025", "0.5575395", "0.55631286", "0.55607754", "0.5552447", "0.5544992", "0.5540979", "0.55096793", "0.5502765", "0.5499776", "0.5499375", "0.54919976", "0.54880244", "0.5480834", "0.5474696", "0.547206", "0.54699785", "0.5464335", "0.5456408", "0.545597", "0.5451472", "0.54503936", "0.5448102", "0.54424727", "0.54269284", "0.54219043", "0.5418544", "0.5417026", "0.54103345", "0.540756", "0.54040647", "0.540311", "0.5402722", "0.5393647", "0.538777", "0.53840053", "0.53801626", "0.5377869", "0.537615", "0.537324", "0.5372593", "0.5368141", "0.53666455", "0.5366538", "0.53601557", "0.53546005", "0.5351563", "0.5350301", "0.53442717", "0.534381", "0.5338009", "0.53336525" ]
0.65248317
2
all_mapshapes = ['planar','toroid','cylinder'] all_lattices = ['hexa','rect']
def set_topology(self, mapsize = None, mapshape = 'planar', lattice = 'rect', mask = None, compname = None): self.mapshape = mapshape self.lattice = lattice #to set mask if mask == None: self.mask = np.ones([1,self.dim]) else: self.mask = mask #to set map size if mapsize == None: tmp = int(round(np.sqrt(self.dlen))) self.nnodes = tmp self.mapsize = [int(3./5*self.nnodes), int(2./5*self.nnodes)] else: if len(mapsize)==2: if np.min(mapsize) == 1: self.mapsize = [1, np.max(mapsize)] else: self.mapsize = mapsize elif len(mapsize) == 1: #s = int (mapsize[0]/2) self.mapsize = [1 ,mapsize[0]] print 'input was considered as node numbers' print 'map size is [{0},{1}]'.format(s,s) self.nnodes = self.mapsize[0]*self.mapsize[1] # to set component names if compname == None: try: cc = list() for i in range(0,self.dim): cc.append ('Variable-'+ str(i+1)) self.compname = np.asarray(cc)[np.newaxis,:] except: pass print 'no data yet: plesae first set trainign data to the SOM' else: try: dim = getattr(self,'dim') if len(compname) == dim: self.compname = np.asarray(compname)[np.newaxis,:] else: print 'compname should have the same size' except: pass print 'no data yet: plesae first set trainign data to the SOM'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generatePolygons():", "def world_map_template():\n fig, ax = plt.subplots(figsize=(20, 10))\n ax.plot(\n laea_x(np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-np.pi, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(-2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(-2 * np.pi / 3, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(0, np.linspace(-np.pi / 2, np.pi / 2)),\n laea_y(0, np.linspace(-np.pi / 2, np.pi / 2)),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), 0),\n laea_y(np.linspace(-np.pi, np.pi), 0),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), np.pi / 6),\n laea_y(np.linspace(-np.pi, np.pi), np.pi / 6),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), -np.pi / 6),\n laea_y(np.linspace(-np.pi, np.pi), -np.pi / 6),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), np.pi / 3),\n laea_y(np.linspace(-np.pi, np.pi), np.pi / 3),\n color=\"k\",\n zorder=10,\n )\n ax.plot(\n laea_x(np.linspace(-np.pi, np.pi), -np.pi / 3),\n laea_y(np.linspace(-np.pi, np.pi), -np.pi / 3),\n color=\"k\",\n zorder=10,\n )\n ax.text(0, 1.47, r\"$|0\\rangle$\", fontsize=20)\n ax.text(0, -1.53, r\"$|1\\rangle$\", fontsize=20)\n ax.text(0.05, 0.05, r\"$|+\\rangle$\", fontsize=20)\n ax.text(2.9, 0, r\"$|-\\rangle$\", fontsize=20)\n ax.text(-3.2, 0, r\"$|-\\rangle$\", fontsize=20)\n\n return fig, ax", "def __init__(self, lower_left_corner, upper_right_corner, projection='merc'):\n self.basemap = MLMap.__create_basemap(lower_left_corner[0], lower_left_corner[1], upper_right_corner[0],\n upper_right_corner[1], projection=projection)\n self.shapes = pd.DataFrame()\n self.shapes_to_draw = []\n llc = self.basemap(lower_left_corner[0], lower_left_corner[1])\n urc = self.basemap(upper_right_corner[0], upper_right_corner[1])\n\n #self.bbox = Bbox([llc, urc])\n self.bbox = (lower_left_corner[0], lower_left_corner[1], upper_right_corner[0], upper_right_corner[1])", "def make_map(data,LatLonBox):\n\n proj = ccrs.LambertConformal(central_longitude=data.StationLongitude,\n central_latitude=data.StationLatitude)\n\n fig = plt.figure(figsize=(17,11))\n ax = plt.subplot(111,projection=proj)\n \n ax.coastlines('50m', 'black', linewidth=2, zorder=2)\n\n reader = shpreader.Reader('/Users/chowdahead/Documents/shapefiles/countyl010g_shp_nt00964/countyl010g.shp')\n counties = list(reader.geometries())\n COUNTIES = cfeature.ShapelyFeature(counties,ccrs.PlateCarree())\n ax.add_feature(COUNTIES, facecolor='none',edgecolor='w')\n # Grab state borders\n state_borders = cfeature.NaturalEarthFeature(\n category='cultural', name='admin_1_states_provinces_lines',\n scale='50m', facecolor='none')\n ax.add_feature(state_borders, edgecolor='w', linewidth=1, zorder=3)\n \n ocean = cfeature.NaturalEarthFeature('physical', 'ocean', scale='50m',\n edgecolor='face',\n facecolor=cfeature.COLORS['water'])\n land = cfeature.NaturalEarthFeature('physical', 'land', scale='50m',\n edgecolor='face',\n facecolor=\"k\")\n\n ax.add_feature(ocean, zorder=-1)\n ax.add_feature(land, zorder=-1)\n ax.set_facecolor('black')\n \n ax.set_extent(LatLonBox,ccrs.PlateCarree())\n \n return fig,ax,proj", "def generate_all_locations(grid, shape):", "def visualizemap(dna, map_view=\"linear\", feature_list=None, start=0, end=None,label_location=None, display_label=2, display_title=True, display_axis=True, fontsize=None, fontsize_nucl=None, \n tick_interval=\"auto\", labelcolor=\"k\", title=None, width_scale=\"auto\", height_scale=1.0, linebreak=None, seq=False, rcseq=False, diamater_scale=1.0, fig= None):\n\n if fontsize is None and map_view == \"linear\":\n fontsize = 12\n elif fontsize is None and map_view == \"circular\":\n fontsize = 10\n else:\n pass \n\n if title is None or title == \"\":\n display_titlee = False\n\n #if map_view == \"circular\":\n #feature_list.sort(key=lambda x:len(dna.printsequence(x.start, x.end)))\n \n standard_scale = 4000\n if map_view == \"circular\":\n figo, ax1, ax2= vc.visualize(dna, format=0, feature_list=feature_list, bottom=400 * diamater_scale, label_visible=display_label, fontsize=fontsize, \n title_visible=display_title, axis_visible=display_axis, tick_space=tick_interval, labelcolor=labelcolor, \n titlename=title, fig=fig)\n try:\n import patchworklib \n _patchworklib = True\n except:\n _patchworklib = False\n \n if _patchworklib == True:\n ax1 = patchworklib.cBrick(ax=ax1)\n ax2 = patchworklib.Brick(ax=ax2)\n if fig == patchworklib.Brick._figure or fig is None:\n return patchworklib.Bricks({ax1.get_label():ax1, ax2.get_label():ax2}) \n else:\n return figo\n else:\n return figo\n else:\n if feature_list is None:\n feature_list = dna.dnafeatures\n figo, ax = vl.visualize(dna, start=start, end=end, feature_list=feature_list, wrap_width=linebreak, annotation_loc=label_location, unvisible_types=[\"source\"], \n visible_types=[], enlarge_w=width_scale, enlarge_h=height_scale, fontsize=fontsize, fontsize_nucl=fontsize_nucl, with_seq=seq, with_seqr=rcseq, nucl_char=None, nucl_color_dict=None, \n label_visible=display_label, scale=\"fix\", title_visible=display_title, axis_visible=display_axis, tick_space=tick_interval, \n labelcolor=labelcolor, titlename=title, fig=fig)\n try:\n import patchworklib \n _patchworklib = True\n except:\n _patchworklib = False\n \n if _patchworklib == True:\n if fig == patchworklib.Brick._figure or fig is None:\n return ax\n else:\n return figo \n else:\n return figo", "def test_render_world_map():\n gdpinfo = gdpinfo_dict()\n codeinfo = codeinfo_dict()\n pygal_countries = pygal_country_dict()\n\n # 1960\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1960\", \"isp_gdp_world_code_1960.svg\")\n\n # 1980\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1980\", \"isp_gdp_world_code_1980.svg\")\n\n # 2000\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2000\", \"isp_gdp_world_code_2000.svg\")\n\n # 2010\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2010\", \"isp_gdp_world_code_2010.svg\")", "def maps(offices, fixed):\n with Image(filename=BAT_B) as page, Drawing() as draw:\n for office, x, y in MAP_POSITIONS:\n label = door_label(offices[office], logo=False)\n if label:\n draw.composite(\"over\", x, y, label.width / 3, label.height / 3, label)\n draw(page)\n page.save(filename=\"generated_map%s.png\" % (\"_fixed\" if fixed else \"\"))", "def _generate(self, feature_map_shape_list, **params):\n pass", "def demo_one_map():\n radii = [2, 5, 7.1, 0.3, 10]\n demo_1(radii)\n demo_2(radii)", "def _get_basemap(self, projection='lambert', geopolygons=None, resolution='i', bound=True, hillshade=False):\n\t\t# fig=plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')\n\t\tminlat = self.attrs['minlat']\n\t\tmaxlat = self.attrs['maxlat']\n\t\tminlon = self.attrs['minlon']\n\t\tmaxlon = self.attrs['maxlon']\n\t\tlat_centre = (maxlat+minlat)/2.0\n\t\tlon_centre = (maxlon+minlon)/2.0\n\t\tif projection=='merc':\n\t\t\tm=Basemap(projection='merc', llcrnrlat=minlat-5., urcrnrlat=maxlat+5., llcrnrlon=minlon-5.,\n\t\t\t\t\t urcrnrlon=maxlon+5., lat_ts=20, resolution=resolution)\n\t\t\t# m.drawparallels(np.arange(minlat,maxlat,dlat), labels=[1,0,0,1])\n\t\t\t# m.drawmeridians(np.arange(minlon,maxlon,dlon), labels=[1,0,0,1])\n\t\t\tm.drawparallels(np.arange(-80.0,80.0,2.0), dashes=[2,2], labels=[1,0,0,0], fontsize=12)\n\t\t\tm.drawmeridians(np.arange(-170.0,170.0,2.0), dashes=[2,2], labels=[0,0,1,0], fontsize=12)\n\t\t\tm.drawstates(color='g', linewidth=2.)\n\t\telif projection=='global':\n\t\t\tm=Basemap(projection='ortho',lon_0=lon_centre, lat_0=lat_centre, resolution=resolution)\n\t\t\t# m.drawparallels(np.arange(-80.0,80.0,10.0), labels=[1,0,0,1])\n\t\t\t# m.drawmeridians(np.arange(-170.0,170.0,10.0), labels=[1,0,0,1])\n\t\telif projection=='regional_ortho':\n\t\t\tm1 = Basemap(projection='ortho', lon_0=minlon, lat_0=minlat, resolution='l')\n\t\t\tm = Basemap(projection='ortho', lon_0=minlon, lat_0=minlat, resolution=resolution,\\\n\t\t\t\tllcrnrx=0., llcrnry=0., urcrnrx=m1.urcrnrx/mapfactor, urcrnry=m1.urcrnry/3.5)\n\t\t\tm.drawparallels(np.arange(-80.0,80.0,10.0), labels=[1,0,0,0], linewidth=2, fontsize=20)\n\t\t\t# m.drawparallels(np.arange(-90.0,90.0,30.0),labels=[1,0,0,0], dashes=[10, 5], linewidth=2, fontsize=20)\n\t\t\t# m.drawmeridians(np.arange(10,180.0,30.0), dashes=[10, 5], linewidth=2)\n\t\t\tm.drawmeridians(np.arange(-170.0,170.0,10.0), linewidth=2)\n\t\telif projection=='lambert':\n\t\t\tdistEW, az, baz=obspy.geodetics.gps2dist_azimuth(minlat, minlon, minlat, maxlon) # distance is in m\n\t\t\tdistNS, az, baz=obspy.geodetics.gps2dist_azimuth(minlat, minlon, maxlat+2., minlon) # distance is in m\n\t\t\tm = Basemap(width=distEW, height=distNS, rsphere=(6378137.00,6356752.3142), resolution='l', projection='lcc',\\\n\t\t\t\tlat_1=minlat, lat_2=maxlat, lon_0=lon_centre, lat_0=lat_centre)\n\t\t\tm.drawparallels(np.arange(-80.0,80.0,2.0), linewidth=1, dashes=[2,2], labels=[1,0,0,0], fontsize=12)\n\t\t\tm.drawmeridians(np.arange(-170.0,170.0,2.0), linewidth=1, dashes=[2,2], labels=[0,0,1,0], fontsize=12)\n\t\t\t# m.drawparallels(np.arange(-80.0,80.0,10.0), linewidth=0.5, dashes=[2,2], labels=[1,0,0,0], fontsize=5)\n\t\t\t# m.drawmeridians(np.arange(-170.0,170.0,10.0), linewidth=0.5, dashes=[2,2], labels=[0,0,0,1], fontsize=5)\n\t\tm.drawcoastlines(linewidth=1.0)\n\t\tm.drawcountries(linewidth=1.0)\n\t\tm.drawstates(linewidth=1.0)\n\t\t# m.drawmapboundary(fill_color=[1.0,1.0,1.0])\n\t\t# m.fillcontinents(lake_color='#99ffff',zorder=0.2)\n\t\t# m.drawlsmask(land_color='0.8', ocean_color='#99ffff')\n\t\tm.drawmapboundary(fill_color=\"white\")\n\t\tif bound:\n\t\t\ttry:\n\t\t\t\t# m.readshapefile('/projects/howa1663/Code/ToolKit/Models/Plates/PB2002_boundaries', name='PB2002_boundaries', drawbounds=True, linewidth=1, color='orange') # draw plate boundary on basemap\n\t\t\t\t#m.readshapefile('/work3/wang/code_bkup/AgeJdF/Plates/PB2002_boundaries', name='PB2002_boundaries', drawbounds=True, \\\n\t\t\t\t#\t\tlinewidth=1, color='orange')\n\t\t\t\tm.readshapefile('/work3/wang/code_bkup/ToolKit/Models/UT_Plates/ridge',name='ridge',drawbounds=True, linewidth=1, color='orange')\n\t\t\t\tm.readshapefile('/work3/wang/code_bkup/ToolKit/Models/UT_Plates/trench',name='trench',drawbounds=True, linewidth=1, color='orange')\n\t\t\t\tm.readshapefile('/work3/wang/code_bkup/ToolKit/Models/UT_Plates/transform',name='transform',drawbounds=True, linewidth=1, color='orange')\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Couldn't read shape file! Continue without drawing plateboundaries\")\n\t\ttry:\n\t\t\tgeopolygons.PlotPolygon(inbasemap=m)\n\t\texcept:\n\t\t\tpass\n\t\tif hillshade:\n\t\t\tfrom netCDF4 import Dataset\n\t\t\tfrom matplotlib.colors import LightSource\n\t\t\tetopo1 = Dataset('/work2/wang/Code/ToolKit/ETOPO1_Ice_g_gmt4.grd','r')\n\t\t\tzz = etopo1.variables[\"z\"][:]\n\t\t\tllons = etopo1.variables[\"x\"][:]\n\t\t\twest = llons<0 # mask array with negetive longitudes\n\t\t\twest = 360.*west*np.ones(len(llons))\n\t\t\tllons = llons+west\n\t\t\tllats = etopo1.variables[\"y\"][:]\n\t\t\tetopoz = zz[(llats>(minlat-2))*(llats<(maxlat+2)), :]\n\t\t\tetopoz = etopoz[:, (llons>(minlon-2))*(llons<(maxlon+2))]\n\t\t\tllats = llats[(llats>(minlat-2))*(llats<(maxlat+2))]\n\t\t\tllons = llons[(llons>(minlon-2))*(llons<(maxlon+2))]\n\t\t\tls = LightSource(azdeg=315, altdeg=45)\n\t\t\tetopoZ = m.transform_scalar(etopoz, llons-360*(llons>180)*np.ones(len(llons)), llats, etopoz.shape[0], etopoz.shape[1])\n\t\t\tls = LightSource(azdeg=315, altdeg=45)\n\t\t\tm.imshow(ls.hillshade(etopoZ, vert_exag=1.),cmap='gray')\n\t\treturn m", "def __init__(self, *args):\n _XCAFDoc.XCAFDoc_ShapeMapTool_swiginit(self,_XCAFDoc.new_XCAFDoc_ShapeMapTool(*args))", "def XCAFDoc_ShapeMapTool_Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_Set(*args)", "def ccs4_map(cfg_set_tds,figsize_x=12,figsize_y=12,hillshade=True,radar_loc=True,radar_vis=True):\r\n \r\n ## Load DEM and Swiss borders\r\n shp_path_CH = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/swissBOUNDARIES3D_1_3_TLM_LANDESGEBIET.shp\")\r\n shp_path_Kantone = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET.shp\")\r\n shp_path_count = os.path.join(cfg_set_tds[\"root_path\"],u\"data/shapefile/CCS4_merged_proj_clip_G05_countries.shp\")\r\n dem_path = os.path.join(cfg_set_tds[\"root_path\"],u\"data/DEM/ccs4.png\")\r\n visi_path = os.path.join(cfg_set_tds[\"root_path\"],u\"data/radar/radar_composite_visibility.npy\")\r\n\r\n dem = Image.open(dem_path)\r\n dem = np.array(dem.convert('P'))\r\n\r\n sf_CH = shapefile.Reader(shp_path_CH)\r\n sf_KT = shapefile.Reader(shp_path_Kantone)\r\n sf_ct = shapefile.Reader(shp_path_count)\r\n\r\n ## Setup figure\r\n fig_extent = (255000,965000,-160000,480000)\r\n fig, axes = plt.subplots(1, 1)\r\n fig.set_size_inches(figsize_x, figsize_y)\r\n \r\n ## Plot altitude / hillshading\r\n if hillshade:\r\n ls = colors.LightSource(azdeg=315, altdeg=45)\r\n axes.imshow(ls.hillshade(-dem, vert_exag=0.05),\r\n extent=fig_extent, cmap='gray', alpha=0.5)\r\n else:\r\n axes.imshow(dem*0.6, extent=fig_extent, cmap='gray', alpha=0.5)\r\n \r\n ## Get borders of Cantons\r\n try:\r\n shapes_KT = sf_KT.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for KT_i, shape in enumerate(shapes_KT):\r\n x = np.array([i[0] for i in shape.points[:]])\r\n y = np.array([i[1] for i in shape.points[:]])\r\n endpoint = np.where(x==x[0])[0][1]\r\n x = x[:endpoint]\r\n y = y[:endpoint]\r\n axes.plot(x,y,color='darkred',linewidth=0.5,zorder=5)\r\n\r\n ## Get borders of neighbouring countries\r\n try:\r\n shapes_ct = sf_ct.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for ct_i, shape in enumerate(shapes_ct):\r\n if ct_i in [0,1]:\r\n continue\r\n x = np.array([i[0] for i in shape.points[:]])\r\n y = np.array([i[1] for i in shape.points[:]])\r\n x[x<=255000] = 245000\r\n x[x>=965000] = 975000\r\n y[y<=-159000] = -170000\r\n y[y>=480000] = 490000\r\n if ct_i in [3]:\r\n axes.plot(x[20:170],y[20:170],color='black',linewidth=0.5)\r\n if ct_i in [2]:\r\n ## Delete common border of FR and CH:\r\n x_south = x[y<=86000]; y_south = y[y<=86000]\r\n x_north = x[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]\r\n #x_north = x[np.logical_and(y>=270577,y<=491000)]\r\n y_north = y[np.logical_and(np.logical_and(y>=270577,y<=491000),x>510444)]\r\n #y_north = y[np.logical_and(y>=270577,y<=491000)]\r\n axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north,y_north,color='black',linewidth=0.5,zorder=4)\r\n if ct_i in [4]:\r\n ## Delete common border of AT and CH:\r\n x_south = x[np.logical_and(x>=831155,y<235000)]\r\n y_south = y[np.logical_and(x>=831155,y<235000)]\r\n #x_north1 = x[np.logical_and(x>=756622,y>=260466)]\r\n x_north1 = x[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]\r\n #y_north1 = y[np.logical_and(x>=756622,y>=260466)]\r\n y_north1 = y[np.logical_and(np.logical_and(x>=758622,y>=262466),x<=794261)]\r\n y_north2 = y[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]\r\n x_north2 = x[np.logical_and(np.logical_and(x>=774261,y>=229333),x<=967000)]\r\n y_north2 = np.concatenate([y_north2[np.argmin(x_north2):],y_north2[:np.argmin(x_north2)]])\r\n x_north2 = np.concatenate([x_north2[np.argmin(x_north2):],x_north2[:np.argmin(x_north2)]])\r\n x_LI = x[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]\r\n y_LI = y[np.logical_and(np.logical_and(x<=773555,y>=214400),y<=238555)]\r\n axes.plot(x_south,y_south,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north1,y_north1,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_north2,y_north2,color='black',linewidth=0.5,zorder=4)\r\n axes.plot(x_LI,y_LI,color='black',linewidth=0.5,zorder=4)\r\n else:\r\n continue\r\n #axes.plot(x,y,color='black',linewidth=1,zorder=4)\r\n\r\n ## Get Swiss borders\r\n try:\r\n #shp_records = sf_CH.shapeRecords()\r\n shapes_CH = sf_CH.shapes()\r\n except UnicodeDecodeError:\r\n print(\" *** Warning: No country shape plotted (UnicodeDecodeErrror)\")\r\n else:\r\n for ct_i, shape in enumerate(shapes_CH): #sf_CH.shapeRecords():\r\n if ct_i!=0: continue\r\n x = np.array([i[0]-2000000 for i in shape.points[:]])\r\n y = np.array([i[1]-1000000 for i in shape.points[:]])\r\n endpoint = np.where(x==x[0])[0][1]\r\n x = x[:endpoint]\r\n y = y[:endpoint]\r\n \r\n ## Convert to swiss coordinates\r\n #x,y = lonlat2xy(lon, lat)\r\n axes.plot(x,y,color='darkred',linewidth=1,zorder=3)\r\n\r\n ## Add weather radar locations:\r\n if radar_loc:\r\n weather_radar_y = [237000,142000,100000,135000,190000]\r\n weather_radar_x = [681000,497000,708000,604000,780000]\r\n axes.scatter(weather_radar_x,weather_radar_y,marker=\"D\",#s=2,\r\n color='orange',edgecolor='black',zorder=10)\r\n \r\n ## Add radar visibility:\r\n if radar_vis:\r\n arr_visi = np.load(visi_path)\r\n arr_visi[arr_visi<9000] = 0\r\n arr_visi2 = morph.binary_opening(morph.binary_erosion(arr_visi, structure=np.ones((4,4))), structure=np.ones((4,4)))\r\n arr_visi[arr_visi<9000] = np.nan\r\n axes.imshow(arr_visi, cmap=\"gray\", alpha=0.2, extent=fig_extent)\r\n arr_visi[np.isnan(arr_visi)] = 1\r\n #axes.contour(arr_visi[::-1,:], levels=[2], cmap=\"gray\", linewidths=2,\r\n # linestyle=\"solid\", alpha=0.5, extent=fig_extent)\r\n #arr_visi = arr_visi[::4, ::4]\r\n #ys, xs = np.mgrid[arr_visi.shape[0]:0:-1,\r\n # 0:arr_visi.shape[1]]\r\n #axes.scatter(xs.flatten(), ys.flatten(), s=4,\r\n # c=arr_visi.flatten().reshape(-1, 3), edgecolor='face')\r\n \r\n ## Add further elements:\r\n axes.set_xlim([255000,965000])\r\n axes.set_ylim([-160000,480000])\r\n axes.grid()\r\n axes.set_ylabel(\"CH1903 Northing\")\r\n axes.set_xlabel(\"CH1903 Easting\")\r\n axes.get_xaxis().set_major_formatter( \\\r\n ticker.FuncFormatter(lambda x, p: format(int(x), \",\").replace(',', \"'\")))\r\n axes.get_yaxis().set_major_formatter( \\\r\n ticker.FuncFormatter(lambda x, p: format(int(x), \",\").replace(',', \"'\")))\r\n plt.yticks(rotation=90, verticalalignment=\"center\")\r\n return fig, axes, fig_extent", "def map_area( m ):\n \n \n m.drawcoastlines( linewidth = 1.5, linestyle = 'solid', color = [ 75./255., 75/255., 75/255. ] )\t\n # ------draw parallels----------------\n circles = np.arange( -90., 90. + 30, 30. ) #delat = 30.\n m.drawparallels( circles, labels = [ 1, 0, 0, 0 ] )\n \n # -------draw meridians---------------\n meridians = np.arange( 0., 360, 60. ) #delon = 60.\n m.drawmeridians( meridians, labels = [ 0, 0, 0, 1 ] )", "def __init__(self):\n self.lattices = []\n self.meshfns = []", "def draw_composite_map(date_obj, t850, u200, v200, u500, v500, mslp, gh500, u850, v850, pwat):\n \n #Get lat and lon arrays for this dataset:\n lat = t850.lat.values\n lon = t850.lon.values\n\n #========================================================================================================\n # Create a Basemap plotting figure and add geography\n #========================================================================================================\n\n #Create a Plate Carree projection object\n proj_ccrs = ccrs.Miller(central_longitude=0.0)\n\n #Create figure and axes for main plot and colorbars\n fig = plt.figure(figsize=(18,12),dpi=125)\n gs = gridspec.GridSpec(12, 36, figure=fig) #[ytop:ybot, xleft:xright]\n ax = plt.subplot(gs[:, :-1],projection=proj_ccrs) #main plot\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax2 = plt.subplot(gs[:4, -1]) #top plot\n ax2.set_xticklabels([])\n ax2.set_yticklabels([])\n ax3 = plt.subplot(gs[4:8, -1]) #bottom plot\n ax3.set_xticklabels([])\n ax3.set_yticklabels([])\n ax4 = plt.subplot(gs[8:, -1]) #bottom plot\n ax4.set_xticklabels([])\n ax4.set_yticklabels([])\n\n #Add political boundaries and coastlines\n ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.BORDERS.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.STATES.with_scale('50m'), linewidths=0.5)\n\n #Add land/lake/ocean masking\n land_mask = cfeature.NaturalEarthFeature('physical', 'land', '50m',\n edgecolor='face', facecolor='#e6e6e6')\n sea_mask = cfeature.NaturalEarthFeature('physical', 'ocean', '50m',\n edgecolor='face', facecolor='#ffffff')\n lake_mask = cfeature.NaturalEarthFeature('physical', 'lakes', '50m',\n edgecolor='face', facecolor='#ffffff')\n ax.add_feature(sea_mask,zorder=0)\n ax.add_feature(land_mask,zorder=0)\n ax.add_feature(lake_mask,zorder=0)\n\n #========================================================================================================\n # Fill contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # 850-hPa temperature\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(-40,40,1)\n cmap = plt.get_cmap('jet')\n extend = \"both\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,t850,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.1)\n\n #--------------------------------------------------------------------------------------------------------\n # PWAT\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(20,71,0.5)\n\n #Define a color gradient for PWAT\n pwat_colors = gradient([[(255,255,255),0.0],[(255,255,255),20.0]],\n [[(205,255,205),20.0],[(0,255,0),34.0]],\n [[(0,255,0),34.0],[(0,115,0),67.0]])\n cmap = pwat_colors.get_cmap(clevs)\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,pwat,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.9)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax2,shrink=0.75,pad=0.01,ticks=[20,30,40,50,60,70])\n\n #--------------------------------------------------------------------------------------------------------\n # 250-hPa wind\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n wind = calc.wind_speed(u200, v200)\n\n #Specify contour settings\n clevs = [40,50,60,70,80,90,100,110]\n cmap = col.ListedColormap(['#99E3FB','#47B6FB','#0F77F7','#AC97F5','#A267F4','#9126F5','#E118F3','#E118F3'])\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,wind,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax3,shrink=0.75,pad=0.01,ticks=clevs)\n\n #--------------------------------------------------------------------------------------------------------\n # 500-hPa smoothed vorticity\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n dx,dy = calc.lat_lon_grid_deltas(lon,lat)\n vort = calc.vorticity(u500, v500, dx=dx, dy=dy)\n smooth_vort = smooth(vort, 5.0) * 10**5\n\n #Specify contour settings\n clevs = np.arange(2,20,1)\n cmap = plt.get_cmap('autumn_r')\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,smooth_vort,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.3)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax4,shrink=0.75,pad=0.01,ticks=clevs[::2])\n \n #========================================================================================================\n # Contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # MSLP\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(960,1040+4,4)\n style = 'solid' #Plot solid lines\n color = 'red' #Plot lines as gray\n width = 0.8 #Width of contours 0.25\n\n #Contour this variable\n cs = ax.contour(lon,lat,mslp,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs,alpha=0.9)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=9, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Geopotential heights\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n gh500 = gh500 / 10.0\n\n #Specify contour settings\n clevs = np.arange(480,612,4)\n style = 'solid' #Plot solid lines\n color = 'black' #Plot lines as gray\n width = 2.0 #Width of contours\n\n #Contour this variable\n cs = ax.contour(lon,lat,gh500,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=12, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Surface barbs\n #--------------------------------------------------------------------------------------------------------\n\n #Plot wind barbs\n _ = ax.quiver(lon, lat, u850.values, v850.values, transform=proj_ccrs, regrid_shape=(38,30), scale=820, alpha=0.5)\n\n #--------------------------------------------------------------------------------------------------------\n # Label highs & lows\n #--------------------------------------------------------------------------------------------------------\n\n #Label highs and lows\n add_mslp_label(ax, proj_ccrs, mslp, lat, lon)\n\n #========================================================================================================\n # Step 6. Add map boundary, legend, plot title, then save image and close\n #========================================================================================================\n\n #Add china province boundary\n add_china_map_2cartopy(ax, name='province')\n\n #Add custom legend\n from matplotlib.lines import Line2D\n custom_lines = [Line2D([0], [0], color='#00A123', lw=5),\n Line2D([0], [0], color='#0F77F7', lw=5),\n Line2D([0], [0], color='#FFC000', lw=5),\n Line2D([0], [0], color='k', lw=2),\n Line2D([0], [0], color='k', lw=0.1, marker=r'$\\rightarrow$', ms=20),\n Line2D([0], [0], color='r', lw=0.8),]\n\n ax.legend(custom_lines, ['PWAT (mm)', '200-hPa Wind (m/s)', '500-hPa Vorticity', '500-hPa Height (dam)', '850-hPa Wind (m/s)', 'MSLP (hPa)'], loc=2, prop={'size':12})\n\n #Format plot title\n title = \"Synoptic Composite \\nValid: \" + dt.datetime.strftime(date_obj,'%Y-%m-%d %H%M UTC')\n st = plt.suptitle(title,fontweight='bold',fontsize=16)\n st.set_y(0.92)\n\n #Return figuration\n return(fig)", "def construct_locations(path_to_shapes, path_to_land_eligibility_km2, path_to_hydro_capacities_mw,\n path_to_biofuel_potential_mwh, flat_roof_share, maximum_installable_power_density,\n scaling_factors, biofuel_efficiency, path_to_output_yaml, path_to_output_csv):\n locations = gpd.GeoDataFrame(\n gpd.read_file(path_to_shapes).set_index(\"id\")\n )\n locations = (\n locations\n .assign(centroid=locations.centroid.rename(\"centroid\"))\n .loc[:, [\"name\", \"centroid\"]]\n )\n capacities = _from_area_to_installed_capacity(\n land_eligibiligy_km2=pd.read_csv(path_to_land_eligibility_km2, index_col=0),\n flat_roof_share=flat_roof_share,\n maximum_installable_power_density=maximum_installable_power_density\n )\n hydro_capacities = pd.read_csv(path_to_hydro_capacities_mw, index_col=0)\n biofuel = pd.read_csv(path_to_biofuel_potential_mwh, index_col=0) * biofuel_efficiency\n locations = locations.merge(\n pd.concat([capacities, hydro_capacities, biofuel], axis=\"columns\", sort=True),\n how=\"left\",\n left_index=True,\n right_index=True,\n validate=\"one_to_one\"\n )\n locations = locations.assign(id=locations.index.str.replace(\".\", \"-\")).set_index(\"id\")\n\n env = jinja2.Environment()\n env.filters[\"unit\"] = filters.unit\n rendered = env.from_string(TEMPLATE).render(\n locations=locations,\n scaling_factors=scaling_factors\n )\n with open(path_to_output_yaml, \"w\") as result_file:\n result_file.write(rendered)\n locations.name.to_csv(path_to_output_csv, index=True, header=True)", "def draw_map(gdf_wrf_all, gdf_wrf_rw, gdf_rw):\n fig = plt.figure(figsize=(6, 6))\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.coastlines(resolution='10m')\n states_provinces = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n scale='10m',\n facecolor='none')\n ax.add_feature(states_provinces, edgecolor='black')\n ax.add_geometries(geoms=gdf_wrf_rw.geometry, crs=ccrs.Mercator.GOOGLE,\n facecolor='#1b9e77', edgecolor='black', alpha=0.5,\n label='Redwood range')\n ax.add_geometries(geoms=gdf_wrf_all.geometry, crs=ccrs.Mercator.GOOGLE,\n facecolor='white', edgecolor='black', alpha=0.3,\n\n label='WRF cells with redwoods')\n ax.add_geometries(geoms=gdf_rw.geometry, crs=ccrs.Mercator.GOOGLE,\n facecolor='#d95f02', edgecolor='black', alpha=0.5,\n label='WRF cells with redwoods')\n bounds = gdf_wrf.total_bounds\n ax.set_extent(bounds[(0, 2, 1, 3), ], crs=ccrs.Mercator.GOOGLE)\n\n # make proxy artists for a legend\n redwoods = mpatches.Rectangle((0, 0), 1, 1, facecolor='#d95f02')\n wrf_cells = mpatches.Rectangle((0, 0), 1, 1, facecolor='white',\n edgecolor='black', alpha=0.3)\n wrf_redwoods = mpatches.Rectangle((0, 0), 1, 1, facecolor='#1b9e77',\n edgecolor='black', alpha=0.5)\n labels = ['Redwoods range',\n 'WRF grid',\n 'WRF cells with redwoods']\n plt.legend([redwoods, wrf_cells, wrf_redwoods], labels,\n loc='lower left', bbox_to_anchor=(0.025, -0.1), fancybox=True)\n\n plt.show()", "def test_render_world_map():\n\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n \n# gdpinfo = {\n# \"gdpfile\": \"gdptable1.csv\",\n# \"separator\": \",\",\n# \"quote\": '\"',\n# \"min_year\": 2000,\n# \"max_year\": 2005,\n# \"country_name\": \"Country Name\",\n# \"country_code\": \"Code\"\n# }\n\n \n # Get pygal country code map\n pygal_countries = pygal.maps.world.COUNTRIES\n \n #test\n #render_world_map(gdpinfo, pygal_countries, \"2002\", \"isp_gdp_world_name_1960.svg\")\n\n # 1960\n render_world_map(gdpinfo, pygal_countries, \"1960\", \"isp_gdp_world_name_1960.svg\")\n\n # 1980\n #render_world_map(gdpinfo, pygal_countries, \"1980\", \"isp_gdp_world_name_1980.svg\")\n\n # 2000\n #render_world_map(gdpinfo, pygal_countries, \"2000\", \"isp_gdp_world_name_2000.svg\")\n\n # 2010\n #render_world_map(gdpinfo, pygal_countries, \"2010\", \"isp_gdp_world_name_2010.svg\")", "def standard_map_peninsula():\n geogr = \"\"\"\\\n OOOOOOOOOOOOOOOOOOOOO\n OOOOOOOOSMMMMJJJJJJJO\n OSSSSSJJJJMMJJJJJJJOO\n OSSSSSSSSSMMJJJJJJOOO\n OSSSSSJJJJJJJJJJJJOOO\n OSSSSSJJJDDJJJSJJJOOO\n OSSJJJJJDDDJJJSSSSOOO\n OOSSSSJJJDDJJJSOOOOOO\n OSSSJJJJJDDJJJJJJJOOO\n OSSSSJJJJDDJJJJOOOOOO\n OOSSSSJJJJJJJJOOOOOOO\n OOOSSSSJJJJJJJOOOOOOO\n OOOOOOOOOOOOOOOOOOOOO\"\"\"\n island = isle.Island(geogr)\n occupants = [{'loc': (1, 19),\n 'pop': [{'species': 'Herbivore', 'age': 9, 'weight': 10},\n {'species': 'Carnivore', 'age': 9, 'weight': 10}]}]\n island.populate_island(occupants)\n return island", "def load_maps(cat,maps=None):\n\n if maps is None:\n if cat.release=='y1':\n maps=np.array(list(config.map_name_y1.keys()))\n elif cat.release=='sv':\n maps=np.array(list(config.map_name_sv.keys()))\n print maps\n for i,x in enumerate(maps):\n print i,x\n if x=='ebv':\n setattr(cat,x,split_methods.get_maps(cat.ra,cat.dec,x,release=cat.release,nside=2048,map=True))\n else:\n setattr(cat,x,split_methods.get_maps(cat.ra,cat.dec,x,release=cat.release))\n\n return", "def generatePolygons(self, *args, **kwargs): \n return 'var PloneMapPolygons = [' + \\\n ''.join([\"{ 'id': '%s', 'path' : %s,'title':'%s'},\" % (object.id, object.polygon, object.Title()) \n for object in self.context.objectValues() \n if hasattr(object, 'polygon') and len(object.polygon) > 0 ])[:-1] \\\n + '];'", "def uk_map(fig1, indata, clevs, datlons, datlats, mtitle, munits, maskswitch):\n\t\n\tfrom mpl_toolkits import basemap as bm\n\timport matplotlib.cm as cm\n\tfrom mpl_toolkits.basemap import shiftgrid \n\tfrom netCDF4 import Dataset\n\tfrom matplotlib.colors import LightSource\n\timport matplotlib.pyplot as plt\n\timport numpy as np\n\timport hillshade\n\timport set_shade\n\timport colour_map\n\t\n\tif maskswitch==1:\n\t\t# import missing data map for masking out of oceans \n\t\tmissdata = Dataset('/exports/work/geos_cxc/users/ahardin4/output/amibatch/afixa/miss.nc', 'r', format='NETCDF3_CLASSIC')\n\t\t\n\t# create the figure and axes instances.\n\tax = fig1.add_axes([0.1,0.1,0.8,0.8])\n\tm = bm.Basemap(llcrnrlon=-9.5,llcrnrlat=49.5,urcrnrlon=2.5,urcrnrlat=59,rsphere=(6378137.00,6356752.3142),\\\n \tresolution='f',area_thresh=1000.,projection='laea', lat_0=54.5,lon_0=-2.75,ax=ax)\n\tm.drawcoastlines()\n\t\n\t# read in etopo5 topography/bathymetry.\n\turl = 'http://ferret.pmel.noaa.gov/thredds/dodsC/data/PMEL/etopo5.nc'\n\tetopodata = Dataset(url)\n\ttopoin = etopodata.variables['ROSE'][:]\n\tlons = etopodata.variables['ETOPO05_X'][:]\n\tlats = etopodata.variables['ETOPO05_Y'][:]\n\t\n\t# shift data so lons go from -180 to 180 instead of 00 to 360.\n\ttopoin,lons = shiftgrid(180.,topoin,lons,start=False)\n\n\t# transform coordinates\n\tx,y=m(datlons[:,:],datlats[:,:])\n\t# transform to nx x ny regularly spaced 5km native projection grid\n\tnx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1\n\ttopodat = m.transform_scalar(topoin,lons,lats,nx,ny)\n\t\n\t# create light source object for topography\n\tls = LightSource(azdeg = 0, altdeg = 2)\n\t# use set_shade function (also available)\n\trgb = set_shade(topodat)\n\n\t# plot image over map with imshow.\n\tim = m.imshow(rgb)\n\t\n\t# apply function to colormap pointers, can be any function at all, as long as\n\t# 0 remains 0, 1 remains 1, and values increase from one to the other.\n\t\n\t# x^4 is good for pseudo-log plots of rainfall:\n\t#log_jet=cmap_xmap(lambda x: (x*x*x*x), cm.hsv)\n\t\n\t#set to lambda x: x for no change:\n\tlog_jet=cmap_xmap(lambda x: (x), cm.jet)\n\t\n\t#apply function to colormap if desired to make whole scale 'hotter' or 'colder'\n\t#example makes colourmap significantly hotter by confining values to upper quarter:\t\n\t#log_jet=cmap_map(lambda x: x/4+0.75, cm.gist_rainbow)\n\t\n\t# mask out oceans, but not lakes. Useful when plotting or comparing against observed\n\tif maskswitch==1:\n\t\tmissmap=missdata.variables['land_map']\n\t\tmissmap2=missdata.variables['land_map']\n\t\t# cut from big mask to small mask if necessary\n\t\t#smallmap=missmap[0,6:46,0:34]\n\t\tsmallmap=missmap[0,:,:]\n\t\tsmallmap2=missmap2[0,:,:]\n\t\t# expand out by one to take into account interpolation\n\t\t\n\t\tfor i in range(1,39):\n\t\t\tfor j in range(1,33):\n\t\t\t\tif smallmap[i,j] == 0.0:\n\t\t\t\t\tsmallmap2[i-1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j-1]=0.0\n\t\t\t\t\tsmallmap2[i+1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j+1]=0.0\n\t\t\n\t\t# perform masking\n\t\tindata=np.ma.masked_array(indata,mask=(smallmap2<-0.5))\n\t\tprint smallmap2[0,0], smallmap2[36,0], smallmap2[20,20]\n\t\t#indata[indata<=0.1]=np.nan\n\t# produce semi-transparent contour map\n\tcontourmap=m.contourf(x,y,indata,clevs,cmap=cm.get_cmap(log_jet,len(clevs)-1),extend='both',\n\t\talpha=0.5,origin='lower',rasterized=True)\n\t\t\n\t# produce simple block plot\n\t#contourmap=m.pcolor(x,y,indata,shading='interp',cmap=cm.get_cmap(log_jet,len(clevs)-1),\n\t#\talpha=0.5)\n\t\t\n\t# place colour bar on right\n\tcb = m.colorbar(contourmap,\"right\", size=\"5%\", pad='3%')\n\t# configure colour bar labeling\n\tcl = plt.getp(cb.ax, 'ymajorticklabels')\n\tcontourmap=plt.setp(cl, fontsize=14)\n\n\t# draw parallels and meridians so as not to clash with colour bar placement\n\t# labels = [left,right,top,bottom]\n\tm.drawparallels(np.arange(-70.,80,1.), labels=[1,0,0,1], fontsize=13)\n\tm.drawmeridians(np.arange(351.,362.,2.),labels=[1,0,0,1], fontsize=13)\n\t\n\t# configure title and units\n\tcb.ax.set_xlabel(munits, fontsize=12)\n\tcontourmap=plt.title(mtitle, fontsize=14)", "def build_compass_map():\n\n for i in range(0, 100):\n # Add bears\n if ENEMY_LIST[i] == 1:\n HAS_COMPASS_MAP.append(COMPASS_DICT[3])\n # Add Grizzly bear\n elif ENEMY_LIST[i] == 2:\n HAS_COMPASS_MAP.append(COMPASS_DICT[4])\n # Add water spots\n elif GROUND_FEATURES_LIST[i] == 10:\n HAS_COMPASS_MAP.append(COMPASS_DICT[1])\n # Add Big Trees\n elif GROUND_FEATURES_LIST[i] == 11:\n HAS_COMPASS_MAP.append(COMPASS_DICT[2])\n # Add nothings\n else:\n HAS_COMPASS_MAP.append(COMPASS_DICT[5])", "def set_locations():\n STATUS['locations']['monster'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['monster'][1] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][0] = generate_random_coord(STATUS['grid_size'])\n STATUS['locations']['weapon'][1] = generate_random_coord(STATUS['grid_size'])", "def _get_basemap(self, projection='lambert', geopolygons=None, resolution='i'):\n # fig=plt.figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')\n lat_centre = (self.maxlat+self.minlat)/2.0\n lon_centre = (self.maxlon+self.minlon)/2.0\n if projection=='merc':\n m=Basemap(projection='merc', llcrnrlat=self.minlat-5., urcrnrlat=self.maxlat+5., llcrnrlon=self.minlon-5.,\n urcrnrlon=self.maxlon+5., lat_ts=20, resolution=resolution)\n m.drawparallels(np.arange(-80.0,80.0,5.0), labels=[1,0,0,1])\n m.drawmeridians(np.arange(-170.0,170.0,5.0), labels=[1,0,0,1])\n m.drawstates(color='g', linewidth=2.)\n elif projection=='global':\n m=Basemap(projection='ortho',lon_0=lon_centre, lat_0=lat_centre, resolution=resolution)\n m.drawparallels(np.arange(-80.0,80.0,10.0), labels=[1,0,0,1])\n m.drawmeridians(np.arange(-170.0,170.0,10.0), labels=[1,0,0,1])\n elif projection=='regional_ortho':\n m1 = Basemap(projection='ortho', lon_0=self.minlon, lat_0=self.minlat, resolution='l')\n m = Basemap(projection='ortho', lon_0=self.minlon, lat_0=self.minlat, resolution=resolution,\\\n llcrnrx=0., llcrnry=0., urcrnrx=m1.urcrnrx/mapfactor, urcrnry=m1.urcrnry/3.5)\n m.drawparallels(np.arange(-80.0,80.0,10.0), labels=[1,0,0,0], linewidth=2, fontsize=20)\n m.drawmeridians(np.arange(-170.0,170.0,10.0), linewidth=2)\n elif projection=='lambert':\n distEW, az, baz=obspy.geodetics.gps2dist_azimuth(self.minlat, self.minlon,\n self.minlat, self.maxlon) # distance is in m\n distNS, az, baz=obspy.geodetics.gps2dist_azimuth(self.minlat, self.minlon,\n self.maxlat+2., self.minlon) # distance is in m\n m = Basemap(width=distEW, height=distNS, rsphere=(6378137.00,6356752.3142), resolution='l', projection='lcc',\\\n lat_1=self.minlat, lat_2=self.maxlat, lon_0=lon_centre, lat_0=lat_centre+1)\n m.drawparallels(np.arange(-80.0,80.0,10.0), linewidth=1, dashes=[2,2], labels=[1,1,0,0], fontsize=15)\n m.drawmeridians(np.arange(-170.0,170.0,10.0), linewidth=1, dashes=[2,2], labels=[0,0,1,0], fontsize=15)\n m.drawcoastlines(linewidth=1.0)\n m.drawcountries(linewidth=1.)\n m.drawstates()\n m.fillcontinents(lake_color='#99ffff',zorder=0.2)\n # m.drawlsmask(land_color='0.8', ocean_color='#99ffff')\n m.drawmapboundary(fill_color=\"white\")\n try: geopolygons.PlotPolygon(inbasemap=m)\n except: pass\n return m", "def data_shapes(self):", "def read_area_shapes(path_ew, path_s):\n output = []\n\n with fiona.open(path_ew, 'r') as reader:\n for lsoa in reader:\n output.append({\n 'type': lsoa['type'],\n 'geometry': lsoa['geometry'],\n 'properties': {\n 'code': lsoa['properties']['LSOA11CD'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n with fiona.open(path_s, 'r') as reader:\n for datazone in reader:\n output.append({\n 'type': datazone['type'],\n 'geometry': datazone['geometry'],\n 'properties': {\n 'code': datazone['properties']['DataZone'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n return output", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def test_map_basics(self):\n self.assertDigitizerMapBasics(self.map, self.dgroup)", "def test_render_world_map():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n codeinfo = {\n \"codefile\": \"isp_country_codes.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"plot_codes\": \"ISO3166-1-Alpha-2\",\n \"data_codes\": \"ISO3166-1-Alpha-3\"\n }\n\n # Get pygal country code map\n pygal_countries = pygal.maps.world.COUNTRIES\n\n # 1960\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1960\", \"isp_gdp_world_code_1960.svg\")\n\n # 1980\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1980\", \"isp_gdp_world_code_1980.svg\")\n\n # 2000\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2000\", \"isp_gdp_world_code_2000.svg\")\n\n # 2010\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2010\", \"isp_gdp_world_code_2010.svg\")", "def test_render_world_map():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n codeinfo = {\n \"codefile\": \"isp_country_codes.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"plot_codes\": \"ISO3166-1-Alpha-2\",\n \"data_codes\": \"ISO3166-1-Alpha-3\"\n }\n\n # Get pygal country code map\n pygal_countries = pygal.maps.world.COUNTRIES\n\n # 1960\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1960\", \"isp_gdp_world_code_1960.svg\")\n\n # 1980\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1980\", \"isp_gdp_world_code_1980.svg\")\n\n # 2000\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2000\", \"isp_gdp_world_code_2000.svg\")\n\n # 2010\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2010\", \"isp_gdp_world_code_2010.svg\")", "def create_maps(self,data,tod,mjd,coords):\n features = np.log10(self.getFeatures(data))/np.log10(2)\n special_idx = np.where((features==16))[0]\n # This is for getting the stare data on more recent\n # calibration observations.\n point_data = self.get_point_data(data,special_idx)\n \n cel_maps = self.create_single_map(tod,\n coords['ra'],\n coords['dec'],\n self.source_positions['ra'][coords['sky_data_flag']],\n self.source_positions['dec'][coords['sky_data_flag']])\n az_maps = self.create_single_map(tod,\n coords['az'],\n coords['el'],\n self.source_positions['az'][coords['sky_data_flag']],\n self.source_positions['el'][coords['sky_data_flag']])\n cel_maps= self.average_maps(cel_maps)\n az_maps = self.average_maps(az_maps)\n xygrid = np.meshgrid((np.arange(self.Nx)+0.5)*self.dx - self.Nx*self.dx/2.,\n (np.arange(self.Ny)+0.5)*self.dy - self.Ny*self.dy/2.)\n \n \n cel_maps['xygrid']=xygrid\n cel_maps['StareCoords']= {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n az_maps['xygrid']=xygrid\n az_maps['StareCoords'] = {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n return cel_maps,az_maps", "def test_render_world_map():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n # Get pygal country code map\n pygal_countries = pygal.maps.world.COUNTRIES\n #pygal_countries = {'KEN':'Kenya', 'IDN':'Indonesia', 'IND':'India', \\\n #'USA':'United States of America'}\n\n # 1960\n #render_world_map(gdpinfo, pygal_countries, \"1960\", \"isp_gdp_world_name_1960.svg\")\n\n # 1980\n #render_world_map(gdpinfo, pygal_countries, \"1980\", \"isp_gdp_world_name_1980.svg\")\n\n # 2000\n #render_world_map(gdpinfo, pygal_countries, \"2000\", \"isp_gdp_world_name_2000.svg\")\n\n # 2010\n render_world_map(gdpinfo, pygal_countries, \"2010\", \"isp_gdp_world_name_2010.svg\")", "def get_valid_locations(location_list, grid, shape):", "def draw_map():\n \n m1 = Chem.MolFromSmiles('c1ccccc1O')\n m2 = Chem.MolFromSmiles('c1ccccc1N')\n \n # Morgan Fingerprint (with normalization)\n # Can also be used with APFingerprint or TTFingerprint\n fig1, maxweight = SimilarityMaps.GetSimilarityMapForFingerprint(m1, m2, SimilarityMaps.GetMorganFingerprint)\n fig1.savefig('/path/to/similaritymap.png',bbox_inches='tight')\n \n # TT Fingerprint (with normalization)\n fig2, maxweight = SimilarityMaps.GetSimilarityMapForFingerprint(m1, m2, SimilarityMaps.GetTTFingerprint)\n fig2.savefig('/path/to/similaritymap.png',bbox_inches='tight')\n\n # Morgan Fingerprint (without normalization)\n weights = SimilarityMaps.GetAtomicWeightsForFingerprint(m1, m2, SimilarityMaps.GetMorganFingerprint)\n fig3 = SimilarityMaps.GetSimilarityMapFromWeights(m2, weights, size=(150, 150))\n fig3.savefig('/path/to/similaritymap.png',bbox_inches='tight') \n \n # the degree of partial charge by using atomic charge\n AllChem.ComputeGasteigerCharges(m1)\n charges = [float(atom.GetProp('_GasteigerCharge')) for atom in m1.GetAtoms()]\n fig4 = SimilarityMaps.GetSimilarityMapFromWeights(m2,charges, size=(150, 150),scale=10)\n fig4.savefig('/path/to/molcharge_similaritymap.png',bbox_inches='tight')", "def m1_make_lower_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n lower_shape_upper_left_row list\n lower_shape_upper_right_row list\n\n lower_shape_lower_left_row list\n lower_shape_lower_right_row list\n \"\"\"\n # upper side\n lower_shape_upper_left_row = []\n lower_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n lower_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n lower_shape_upper_right_row.extend(right_points)\n\n # lower side\n lower_shape_lower_left_row = []\n lower_shape_lower_right_row = []\n\n for i in range(l_n - 1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n lower_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n lower_shape_lower_right_row.extend(right_points)\n\n lower_shape_upper = [lower_shape_upper_left_row, lower_shape_upper_right_row]\n lower_shape_lower = [lower_shape_lower_left_row, lower_shape_lower_right_row]\n\n return lower_shape_upper, lower_shape_lower", "def make_mage_ellipsoids(ids, coord_dict, coord_low_dict,\n coord_high_dict, color, ellipsoid_prefs=\\\n {\"smoothness\":2,\"alpha\":.25}):\n alpha = ellipsoid_prefs['alpha']\n nsubdivs = ellipsoid_prefs['smoothness']\n result = []\n coord_lines = []\n for id_ in sorted(ids):\n if id_ in coord_dict:\n center = coord_dict[id_][:3]\n dims = coord_high_dict[id_][:3] - coord_low_dict[id_][:3]\n\n faces = make_ellipsoid_faces(center, dims, nsubdivs=nsubdivs)\n for face in faces:\n result.append(\"@trianglelist color=%s alpha=%f master={points} nobutton\" %(color, alpha))\n for point in face:\n result.append(' '.join(map(str,point)))\n return result", "def m1_make_upper_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n upper_shape_upper_left_row list\n upper_shape_upper_right_row list\n\n upper_shape_lower_left_row list\n upper_shape_lower_right_row list\n \"\"\"\n # upper side\n upper_shape_upper_left_row = []\n upper_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10 # have to \"+\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_upper_right_row.extend(right_points)\n\n # lower side\n upper_shape_lower_left_row = []\n upper_shape_lower_right_row = []\n\n for i in range(l_n -1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10 # have to \"-\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_lower_right_row.extend(right_points)\n\n upper_shape_upper = [upper_shape_upper_left_row, upper_shape_upper_right_row]\n upper_shape_lower = [upper_shape_lower_left_row, upper_shape_lower_right_row]\n\n return upper_shape_upper, upper_shape_lower", "def unitmap(tiles_list, army_id):\n # array of strings\n len_x = max([tile['x'] for tile in tiles_list])\n len_y = max([tile['y'] for tile in tiles_list])\n text_map = [ [\"\"] * (len_x+1) for _ in range(len_y+1)]\n for tile in tiles_list:\n xpos, ypos = tile['x'], tile['y']\n if tile.get('unit_name') is not None:\n mapchar = UNIT_SHORTCODES[tile['unit_name']]\n text_map[ypos][xpos] = (mapchar.upper() if tile['unit_army_id'] == army_id\n else mapchar.lower())\n else:\n text_map[ypos][xpos] = \" \"\n return text_map", "def generate_map():\n o = []\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n pos = [np.random.randint(100, 412), np.random.randint(80, 304)]\n models = make_models()\n\n print(\"# of groups: {}\".format(timestamps.shape[0] // note_group_size))\n for i in range(timestamps.shape[0] // note_group_size):\n z = generate_set(models, begin=i * note_group_size, start_pos=pos, length_multiplier=dist_multiplier,\n group_id=i, plot_map=False)[:, :6] * np.array([512, 384, 1, 1, 512, 384])\n pos = z[-1, 0:2]\n o.append(z)\n a = np.concatenate(o, axis=0)\n return a", "def __load_topography__(filepath):\n\tfrom clawpack.geoclaw import topotools\n\ttopo = topotools.Topography(filepath)\n\t\n\tif TESTING:\n\t\timport matplotlib.pyplot as plt\n\t\ttopo.plot()\n\t\tplt.show()\n\ttopo.topo_type = 3\n\txgrid = topo.X\n\tygrid = topo.Y\n\tzgrid = topo.Z\n\t\n\t#temp; find a better solution (e.g. convert from lat/lon to actual space)\n\t#xgrid = 1.e4 * xgrid\n\t#ygrid = 1.e4 * ygrid\n\t\n\t#test only\n\tshape = zgrid.shape\n\tny, nx = shape[0], shape[1]\n\t#for iy in range(0,ny):\n\t\t#zgrid[iy, 0] = zgrid[iy,0]+1e4\n\t#for ix in range(0,nx):\n\t\t#zgrid[1, ix] = zgrid[1,ix]-1e4\n\t\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\twavyz = wavy(xgrid, ygrid)\n\t\n\t\n\tfor ix in range(0,0):\n\t\tfor iy in range(0,0):\n\t\t\tzgrid[iy, ix] = 1e4*wavyz[iy, ix]\n\t\n\tzgrid = 1e-4 * zgrid\n\t\n\treturn (xgrid, ygrid, zgrid)", "def create_jointsmap(uv_coord, size):\r\n\r\n\t# define connections and colors of the bones\r\n\t# print(coords_hw[-1]) # this is center ( the 22nd point)\r\n\tcanvas = np.zeros((size, size, 3))\r\n\tbones = [\r\n\t\t((1, 2), THUMB_COLOR1),\r\n\t\t((2, 3), THUMB_COLOR2),\r\n\t\t((3, 4), THUMB_COLOR3),\r\n\r\n\t\t((5, 6), INDEX_COLOR1),\r\n\t\t((6, 7), INDEX_COLOR2),\r\n\t\t((7, 8), INDEX_COLOR3),\r\n\r\n\t\t((9, 10), MIDDLE_COLOR1),\r\n\t\t((10, 11), MIDDLE_COLOR2),\r\n\t\t((11, 12), MIDDLE_COLOR3),\r\n\r\n\t\t((13, 14), RING_COLOR1),\r\n\t\t((14, 15), RING_COLOR2),\r\n\t\t((15, 16), RING_COLOR3),\r\n\r\n\t\t((17, 18), PINKY_COLOR1),\r\n\t\t((18, 19), PINKY_COLOR2),\r\n\t\t((19, 20), PINKY_COLOR3)]\r\n\tpalm = []\r\n\tfor connection, _ in [((0, 1), []),\r\n\t\t\t\t\t\t ((1, 5), []),\r\n\t\t\t\t\t\t ((5, 9), []),\r\n\t\t\t\t\t\t ((9, 13), []),\r\n\t\t\t\t\t\t ((13, 17), []),\r\n\t\t\t\t\t\t ((17, 0), []), ]:\r\n\t\tcoord1 = uv_coord[connection[0]]\r\n\t\tpalm.append([int(coord1[0]), int(coord1[1])])\r\n\t# palm.append([int((coord1[0]-.5)* W_scale+ W_offset ), int(-(coord1[1]- .5)* H_scale+ H_offset)])\r\n\t# print(palm)\r\n\tcv2.fillConvexPoly(canvas, np.array([palm], dtype=np.int32), PALM_COLOR)\r\n\tfor connection, color in bones:\r\n\t\tcoord1 = uv_coord[connection[0]]\r\n\t\tcoord2 = uv_coord[connection[1]]\r\n\t\tcoords = np.stack([coord1, coord2])\r\n\t\t# 0.5, 0.5 is the center\r\n\t\tx = coords[:, 0]\r\n\t\ty = coords[:, 1]\r\n\t\tmX = x.mean()\r\n\t\tmY = y.mean()\r\n\t\tlength = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5\r\n\t\tangle = np.math.degrees(np.math.atan2(y[0] - y[1], x[0] - x[1]))\r\n\t\tpolygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), 16), int(angle), 0, 360, 1)\r\n\t\tcv2.fillConvexPoly(canvas, polygon, color)\r\n\treturn canvas", "def m1_make_middle_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_upper_left_row list\n middle_shape_upper_right_row list\n\n middle_shape_lower_left_row list\n middle_shape_lower_right_row list\n \"\"\"\n # upper side\n middle_shape_upper_left_row = []\n middle_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_upper_right_row.extend(right_points)\n\n # lower side\n middle_shape_lower_left_row = []\n middle_shape_lower_right_row = []\n\n for i in range(l_n - 1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_lower_right_row.extend(right_points)\n\n middle_shape_upper = [middle_shape_upper_left_row, middle_shape_upper_right_row]\n middle_shape_lower = [middle_shape_lower_left_row, middle_shape_lower_right_row]\n\n return middle_shape_upper, middle_shape_lower", "def nominal_map(options):\n pass", "def _get_feature_map_shape(self, features):\n feature_map_shapes = [shape_utils.combined_static_and_dynamic_shape(feature) for feature in features]\n return [(shape[1], shape[2]) for shape in feature_map_shapes]", "def get_shapes4oceans(featurecla='ocean', rtn_group=False):\n # location of data\n URL = \"http://www.naturalearthdata.com/downloads/10m-physical-labels/\"\n URL += \"/10m-ocean/\"\n # Shapefiles locally?\n # TODO - update to download automatically and store in AC_tools' data directory\n# shapefiles = 'ne_10m_ocean'\n shapefiles = 'ne_10m_geography_marine_polys'\n folder = '/mnt/lustre/users/ts551/labbook/Python_progs/'\n folder += '/AC_tools/data/shapefiles/{}'.format(shapefiles, shapefiles)\n group = geopandas.read_file(folder)\n # Just select state of interest\n choosen_group = group.query(\"featurecla == '{}'\".format(featurecla))\n choosen_group = choosen_group.reset_index(drop=True)\n # Get the shapes\n shapes = zip(choosen_group.geometry, range(len(choosen_group)))\n if rtn_group:\n return choosen_group\n else:\n return shapes", "def view_map():\n print(\"\"\"\n ____________________________________Client Rooms______________________\n| |1 Locker Rooms 2| 1 | 2 | |\n| |_________ ________| | | Dance |\n| | | |__| |__| Floor |\n| | | Hall |\n| Garage | Front | _______|_______ |\n| | Lobby | | |_ ____________|\n| | | | Storage |\n| | | Lounge |______________|\n| ______________ Car\n|___________________Front Entrance______________________| Allyway\n\"\"\")", "def get_sub_map(args: utils.Args, x, y, city_name, vectors=[], polyline_spans=[], mapping=None):\n\n if args.not_use_api:\n pass\n else:\n assert isinstance(am, ArgoverseMap)\n # Add more lane attributes, such as 'has_traffic_control', 'is_intersection' etc.\n if 'semantic_lane' in args.other_params:\n lane_ids = am.get_lane_ids_in_xy_bbox(x, y, city_name, query_search_range_manhattan=args.max_distance)\n local_lane_centerlines = [am.get_lane_segment_centerline(lane_id, city_name) for lane_id in lane_ids]\n polygons = local_lane_centerlines\n\n if args.visualize:\n angle = mapping['angle']\n vis_lanes = [am.get_lane_segment_polygon(lane_id, city_name)[:, :2] for lane_id in lane_ids]\n t = []\n for each in vis_lanes:\n for point in each:\n point[0], point[1] = rotate(point[0] - x, point[1] - y, angle)\n num = len(each) // 2\n t.append(each[:num].copy())\n t.append(each[num:num * 2].copy())\n vis_lanes = t\n mapping['vis_lanes'] = vis_lanes\n else:\n polygons = am.find_local_lane_centerlines(x, y, city_name,\n query_search_range_manhattan=args.max_distance)\n polygons = [polygon[:, :2].copy() for polygon in polygons]\n angle = mapping['angle']\n for index_polygon, polygon in enumerate(polygons):\n for i, point in enumerate(polygon):\n point[0], point[1] = rotate(point[0] - x, point[1] - y, angle)\n if 'scale' in mapping:\n assert 'enhance_rep_4' in args.other_params\n scale = mapping['scale']\n point[0] *= scale\n point[1] *= scale\n\n def dis_2(point):\n return point[0] * point[0] + point[1] * point[1]\n\n def get_dis(point_a, point_b):\n return np.sqrt((point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2)\n\n def get_dis_for_points(point, polygon):\n dis = np.min(np.square(polygon[:, 0] - point[0]) + np.square(polygon[:, 1] - point[1]))\n return np.sqrt(dis)\n\n def ok_dis_between_points(points, points_, limit):\n dis = np.inf\n for point in points:\n dis = np.fmin(dis, get_dis_for_points(point, points_))\n if dis < limit:\n return True\n return False\n\n def get_hash(point):\n return round((point[0] + 500) * 100) * 1000000 + round((point[1] + 500) * 100)\n\n lane_idx_2_polygon_idx = {}\n for polygon_idx, lane_idx in enumerate(lane_ids):\n lane_idx_2_polygon_idx[lane_idx] = polygon_idx\n\n # There is a lane scoring module (see Section 3.2) in the paper in order to reduce the number of goal candidates.\n # In this implementation, we use goal scoring instead of lane scoring, because we observed that it performs slightly better than lane scoring.\n # Here we only sample sparse goals, and dense goal sampling is performed after goal scoring (see decoder).\n if 'goals_2D' in args.other_params:\n points = []\n visit = {}\n point_idx_2_unit_vector = []\n\n mapping['polygons'] = polygons\n\n for index_polygon, polygon in enumerate(polygons):\n for i, point in enumerate(polygon):\n hash = get_hash(point)\n if hash not in visit:\n visit[hash] = True\n points.append(point)\n\n # Subdivide lanes to get more fine-grained 2D goals.\n if 'subdivide' in args.other_params:\n subdivide_points = get_subdivide_points(polygon)\n points.extend(subdivide_points)\n subdivide_points = get_subdivide_points(polygon, include_self=True)\n\n mapping['goals_2D'] = np.array(points)\n\n for index_polygon, polygon in enumerate(polygons):\n assert_(2 <= len(polygon) <= 10, info=len(polygon))\n # assert len(polygon) % 2 == 1\n\n # if args.visualize:\n # traj = np.zeros((len(polygon), 2))\n # for i, point in enumerate(polygon):\n # traj[i, 0], traj[i, 1] = point[0], point[1]\n # mapping['trajs'].append(traj)\n\n start = len(vectors)\n if 'semantic_lane' in args.other_params:\n assert len(lane_ids) == len(polygons)\n lane_id = lane_ids[index_polygon]\n lane_segment = am.city_lane_centerlines_dict[city_name][lane_id]\n assert_(len(polygon) >= 2)\n for i, point in enumerate(polygon):\n if i > 0:\n vector = [0] * args.hidden_size\n vector[-1 - VECTOR_PRE_X], vector[-1 - VECTOR_PRE_Y] = point_pre[0], point_pre[1]\n vector[-1 - VECTOR_X], vector[-1 - VECTOR_Y] = point[0], point[1]\n vector[-5] = 1\n vector[-6] = i\n\n vector[-7] = len(polyline_spans)\n\n if 'semantic_lane' in args.other_params:\n vector[-8] = 1 if lane_segment.has_traffic_control else -1\n vector[-9] = 1 if lane_segment.turn_direction == 'RIGHT' else \\\n -1 if lane_segment.turn_direction == 'LEFT' else 0\n vector[-10] = 1 if lane_segment.is_intersection else -1\n point_pre_pre = (2 * point_pre[0] - point[0], 2 * point_pre[1] - point[1])\n if i >= 2:\n point_pre_pre = polygon[i - 2]\n vector[-17] = point_pre_pre[0]\n vector[-18] = point_pre_pre[1]\n\n vectors.append(vector)\n point_pre = point\n\n end = len(vectors)\n if start < end:\n polyline_spans.append([start, end])\n\n return (vectors, polyline_spans)", "def test_map_settings_custom():\n m = view(nybb, zoom_control=False, width=200, height=200, tiles=\"CartoDB positron\")\n assert m.location == [\n pytest.approx(40.70582377450201, rel=1e-6),\n pytest.approx(-73.9778006856748, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 10\n assert m.options[\"zoomControl\"] == False\n assert m.height == (200.0, \"px\")\n assert m.width == (200.0, \"px\")\n assert \"cartodbpositron\" in m.to_dict()[\"children\"].keys()\n\n # custom XYZ tiles\n m = view(\n nybb,\n zoom_control=False,\n width=200,\n height=200,\n tiles=\"https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}\",\n attr=\"Google\",\n )\n\n out_str = _fetch_map_string(m)\n assert (\n 'tileLayer(\"https://mt1.google.com/vt/lyrs=m\\\\u0026x={x}\\\\u0026y={y}\\\\u0026z={z}\",{\"attribution\":\"Google\"'\n in out_str\n )\n\n m = view(nybb, location=(40, 5))\n assert m.location == [40, 5]\n assert m.options[\"zoom\"] == 10\n\n m = view(nybb, zoom_start=8)\n assert m.location == [\n pytest.approx(40.70582377450201, rel=1e-6),\n pytest.approx(-73.9778006856748, rel=1e-6),\n ]\n assert m.options[\"zoom\"] == 8\n\n m = view(nybb, location=(40, 5), zoom_start=8)\n assert m.location == [40, 5]\n assert m.options[\"zoom\"] == 8", "def test_20_supergeom_simple(self):\n for proj in ['TAN', 'CEA']:\n ra0, dec0 = CRVAL\n res = 0.01 * DEG\n wcs = coords.get_wcs_kernel(proj, ra0, dec0, res)\n\n wcs.wcs.crpix = (60, 70)\n map0 = enmap.zeros((100,200), wcs=wcs)\n map0[2, 3] = 10.\n map0[90, 192] = 11.\n\n # Extracts.\n m1 = map0[:10,:10]\n m2 = map0[-10:,-10:]\n \n # Reconstruct.\n sg = coords.get_supergeom((m1.shape, m1.wcs), (m2.shape, m2.wcs))\n mapx = enmap.zeros(*sg)\n mapx.insert(m1)\n mapx.insert(m2)\n self.assertTupleEqual(map0.shape, mapx.shape)\n self.assertTrue(np.all(mapx==map0))", "def m4_make_lower_shape_points_list(tx, ty, m4_info, SEN_info):\n \"\"\"\n 1 Get information from m4_info & SEN_info\n \"\"\"\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n lower_shape_upper_left_row list\n lower_shape_upper_right_row list\n\n lower_shape_lower_left_row list\n lower_shape_lower_right_row list\n \"\"\"\n # upper side\n lower_shape_upper_left_row = []\n lower_shape_upper_right_row = []\n\n for i in range(u_n - 1, -1, -1):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n lower_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n):\n # right row\n ix = tx - t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n lower_shape_upper_right_row.extend(right_points)\n\n # lower side\n lower_shape_lower_left_row = []\n lower_shape_lower_right_row = []\n\n for i in range(l_n):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n lower_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n - 1, -1, -1):\n # right row\n ix = tx - t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n lower_shape_lower_right_row.extend(right_points)\n\n lower_shape_upper = [lower_shape_upper_left_row, lower_shape_upper_right_row]\n lower_shape_lower = [lower_shape_lower_left_row, lower_shape_lower_right_row]\n\n return lower_shape_upper, lower_shape_lower", "def m4_make_middle_shape_points_list(tx, ty, m4_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_upper_left_row list\n middle_shape_upper_right_row list\n\n middle_shape_lower_left_row list\n middle_shape_lower_right_row list\n \"\"\"\n # upper side\n middle_shape_upper_left_row = []\n middle_shape_upper_right_row = []\n\n for i in range(u_n - 1, -1, -1):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p3, p4, p1, p2]\n middle_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n):\n # right row\n ix = tx - t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4]\n middle_shape_upper_right_row.extend(right_points)\n\n # lower side\n middle_shape_lower_left_row = []\n middle_shape_lower_right_row = []\n\n for i in range(l_n):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p3, p4, p1, p2]\n middle_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n - 1, -1, -1):\n # right row\n ix = tx - t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4]\n middle_shape_lower_right_row.extend(right_points)\n\n middle_shape_upper = [middle_shape_upper_left_row, middle_shape_upper_right_row]\n middle_shape_lower = [middle_shape_lower_left_row, middle_shape_lower_right_row]\n\n return middle_shape_upper, middle_shape_lower", "def spatial(self):", "def PMT_to_flat_cylinder_mapping( tubes, tube_xyz ):\n mapping = {}\n for idx, tube in enumerate(tubes):\n x = tube_xyz[idx,0]\n y = tube_xyz[idx,1]\n z = tube_xyz[idx,2]\n if ( y > endcap_limit ):\n # in top circle of cylinder\n xflat = x\n yflat = y_offset + z\n mapping[ int( tube-1 ) ] = [ xflat, yflat ]\n \n elif ( y < -endcap_limit):\n # in bottom circle of cylinder+\n xflat = x\n yflat = -y_offset + z\n mapping[ int( tube-1 ) ] = [ xflat, yflat ]\n \n else:\n # in barrel part of cylinder\n theta = math.atan2( z, x )\n xflat = R * theta\n yflat = y\n mapping[ int( tube-1 ) ] = [ xflat, yflat ]\n return mapping", "def __basemap_ancillary(m, latvalues=None, lonvalues=None, drawparallels=True, drawcountries=True, land_color=0.8):\n\n if latvalues is None:\n latvalues = np.arange(-90., 120., 30.)\n if lonvalues is None:\n lonvalues = np.arange(-180., 180., 90.)\n if drawcountries:\n m.drawcountries()\n m.drawcoastlines()\n m.drawlsmask(lakes=True, land_color=land_color)\n m.drawmapboundary() # draw a line around the map region\n if drawparallels:\n m.drawparallels(latvalues, labels=[1, 0, 0, 0])\n m.drawmeridians(lonvalues, labels=[0, 0, 0, 1]) # draw meridians", "def SimpleLatLongGrid(min_x,min_y,max_x,max_y,hdeg,hmin,hsec,vdeg,vmin,vsec,\n color=(0.5,1.0,0.5,1.0),xoff=-0.18,yoff=1.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n x_spacing=float(hdeg)+(float(hmin)+(float(hsec)/60.0))/60.0\n y_spacing=float(vdeg)+(float(vmin)+(float(vsec)/60.0))/60.0\n\n\n # Round to nearest integer space\n max_x=min_x+numpy.floor((max_x-min_x)/x_spacing)*x_spacing\n max_y=min_y+numpy.floor((max_y-min_y)/y_spacing)*y_spacing\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n for hval in numpy.arange(min_x,\n max_x+x_spacing/100.0,\n x_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n hstr=GetLatLongString(hval,'longitude')\n pshp.set_property('position',hstr)\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,\n max_y+y_spacing/100.0,\n y_spacing):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n vstr=GetLatLongString(vval,'latitude')\n pshp.set_property('position',vstr)\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True)\n\n return layer", "def generate_map(nrows, ncols, nrooms, max_col_size, max_row_size):\n arr = np.zeros((nrows, ncols), dtype=np.int8)\n\n for i in range(nrooms):\n rand_row_start = np.random.randint(nrows)\n rand_col_start = np.random.randint(ncols)\n\n rand_row_size = np.random.randint(max_row_size / 2, max_row_size)\n rand_col_size = np.random.randint(max_col_size / 2, max_col_size)\n\n arr[rand_row_start:rand_row_start + rand_row_size, rand_col_start:rand_col_start + rand_col_size] = 1\n\n labels = measure.label(arr)\n regions = measure.regionprops(labels)\n\n centroids = list()\n for region in regions:\n centroids.append(region.centroid)\n\n num_centroids = len(centroids)\n\n # get distances between every pair of centroids\n dists = scipy.spatial.distance.cdist(centroids, centroids)\n\n # get a distance that is greater than all current distances\n max_dist = np.max(dists) + 1\n\n # make sure upper triangle is at least max_dist so that when picking closest\n # pairs, we won't choose a diagonal element or a duplicate connection\n dists = dists + np.triu(np.ones((num_centroids, num_centroids))) * max_dist\n\n for i in range(num_centroids - 1):\n min_dist_idx = np.argmin(dists)\n min_dist_idx = np.unravel_index(min_dist_idx, dists.shape)\n\n # create a hallway between regionprops\n centroid1 = np.array(centroids[min_dist_idx[0]], dtype=np.int)\n centroid2 = np.array(centroids[min_dist_idx[1]], dtype=np.int)\n\n [row_centroid_1, row_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[0])\n [col_centroid_1, col_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[1])\n\n arr[row_centroid_1[0]:row_centroid_2[0] + 1, row_centroid_1[1]] = 1\n arr[row_centroid_2[0], col_centroid_1[1]:col_centroid_2[1] + 1] = 1\n\n dists[:, min_dist_idx[1]] += max_dist\n\n return arr", "def __init__(self,\n access_token=None,\n center=(0, 0),\n opacity=1,\n div_id='map',\n height='500px',\n style='mapbox://styles/mapbox/light-v9?optimize=true',\n width='100%',\n zoom=0,\n min_zoom=0,\n max_zoom=24,\n pitch=0,\n bearing=0,\n box_zoom_on=True,\n double_click_zoom_on=True,\n scroll_zoom_on=True,\n touch_zoom_on=True,\n legend_fill='white',\n legend_header_fill='white',\n legend_text_color='#6e6e6e',\n legend_title_halo_color='white',\n legend_key_borders_on=True\n ):\n if access_token is None:\n access_token = os.environ.get('MAPBOX_ACCESS_TOKEN', '')\n if access_token.startswith('sk'):\n raise TokenError('Mapbox access token must be public (pk), not secret (sk). ' \\\n 'Please sign up at https://www.mapbox.com/signup/ to get a public token. ' \\\n 'If you already have an account, you can retreive your token at https://www.mapbox.com/account/.')\n self.access_token = access_token\n self.template = 'map'\n self.div_id = div_id\n self.width = width\n self.height = height\n self.style = style\n self.center = center\n self.zoom = zoom\n self.opacity = opacity\n self.label_property = None\n self.min_zoom = min_zoom\n self.max_zoom = max_zoom\n self.pitch = pitch\n self.bearing = bearing\n self.box_zoom_on = box_zoom_on\n self.double_click_zoom_on = double_click_zoom_on\n self.scroll_zoom_on = scroll_zoom_on\n self.touch_zoom_on = touch_zoom_on\n self.legend_fill = legend_fill\n self.legend_header_fill = legend_header_fill\n self.legend_text_color = legend_text_color,\n self.legend_title_halo_color = legend_title_halo_color\n self.legend_key_borders_on = legend_key_borders_on\n self.layer_id_counter = 0\n self.layers = []", "def m4_make_upper_shape_points_list(tx, ty, m4_info, SEN_info):\n \"\"\"\n 1 Get information from m4_info & SEN_info\n \"\"\"\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n upper_shape_upper_left_row list\n upper_shape_upper_right_row list\n\n upper_shape_lower_left_row list\n upper_shape_lower_right_row list\n \"\"\"\n # upper side\n upper_shape_upper_left_row = []\n upper_shape_upper_right_row = []\n\n for i in range(u_n - 1, -1, -1):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n upper_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n):\n # right row\n ix = tx - t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n upper_shape_upper_right_row.extend(right_points)\n\n # lower side\n upper_shape_lower_left_row = []\n upper_shape_lower_right_row = []\n\n for i in range(l_n):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n upper_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n - 1, -1, -1):\n # right row\n ix = tx - t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n upper_shape_lower_right_row.extend(right_points)\n\n upper_shape_upper = [upper_shape_upper_left_row, upper_shape_upper_right_row]\n upper_shape_lower = [upper_shape_lower_left_row, upper_shape_lower_right_row]\n\n return upper_shape_upper, upper_shape_lower", "def plot_map(data, pixlist, nside, runconf, title, unit):\n hpmap = np.repeat(np.nan, hp.pixelfunc.nside2npix(nside))\n hpmap[pixlist] = data\n plot_lon_cen, plot_lat_cen = runconf['ra'], runconf['dec']\n if plot_lon_cen > 180:\n plot_lon_cen -= 360\n plot_bounds = (1.0 + 1.1 * runconf['radius']) * np.array([-1.0, 1.0])\n \n hp.cartview(hpmap, rot=(plot_lon_cen, plot_lat_cen, 0), \n lonra=plot_bounds, latra=plot_bounds,\n notext=True, unit=unit, title=title, \n min=min(data), max=max(data), coord='C', flip='astro')\n\n dec0 = np.round(runconf['dec'])\n dec_spacing = runconf['radius']/2.0\n decs = dec0 + np.arange(-2*runconf['radius'], 2.1*runconf['radius'], dec_spacing)\n decs_min, decs_max = min(decs), max(decs)\n\n ra0 = np.round(runconf['ra'])\n cosdelt =np.cos(np.deg2rad(dec0)) \n ra_spacing = runconf['radius']/cosdelt / 2.0\n ras = ra0 + np.arange(-2.0*runconf['radius']/cosdelt, 2.1*runconf['radius']/cosdelt, ra_spacing)\n ras_min, ras_max = min(ras), max(ras)\n #num_ras = np.ceil(1.0 * runconf['radius'] / grid_spacing / np.cos(np.deg2rad(min(decs))) )\n\n line_dec = np.linspace(decs_min, decs_max, 100)\n line_ra = np.linspace(ras_min, ras_max, 100)\n for ra in ras:\n hp.projplot(np.repeat(ra, 100), line_dec, lonlat=True, ls='dashed', color='black')\n hp.projtext(ra, dec0, r\"{ra:0.1f}$^\\circ$\".format(ra=ra), lonlat=True, clip_on=True)\n for dec in decs:\n hp.projplot(line_ra, np.repeat(dec, 100), lonlat=True, ls='dashed', color='black')\n hp.projtext(ra0, dec, r\"{dec:0.1f}$^\\circ$\".format(dec=dec), lonlat=True, clip_on=True, rotation=90)\n return", "def setup_maps(self):\n super().setup_maps()\n sprite_classes = {\n \"walls\": Wall,\n \"play\": Background,\n \"exit\": Background,\n }\n island_map = TiledMap((\"images/qwerty_game_1.tmx\"), sprite_classes)\n self.add_map(island_map)", "def drawShapes(self):\n self.draw_polygon(self.poly3.get_points() , color = \"#000\")\n self.draw_polygon(self.poly2.get_points() , color = \"#000\")\n self.draw_polygon(self.poly1.get_points() , color = \"#000\")\n self.draw_rect(0, 0, self.width, self.height, color= \"#000\")\n \"\"\"These statements are used to determine if a point is inside any of the\n 3 polygons and if so changes the point's color\"\"\"\n if (self.poly2.point_inside_polygon(self.p1) or self.poly1.point_inside_polygon(self.p1)\n or self.poly3.point_inside_polygon(self.p1)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p1.x, self.p1.y, 7, 7, color)\n\n if (self.poly2.point_inside_polygon(self.p2) or self.poly1.point_inside_polygon(self.p2)\n or self.poly3.point_inside_polygon(self.p2)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p2.x, self.p2.y, 7, 7, color)\n if (self.poly2.point_inside_polygon(self.p3) or self.poly1.point_inside_polygon(self.p3)\n or self.poly3.point_inside_polygon(self.p3)):\n color = \"#0F0\"\n else:\n color = \"#F00\"\n self.fill_oval(self.p3.x, self.p3.y, 7, 7, color)", "def is_map(self, alias):\n maps = {\"Ensembl2Reactome_All_Levels\": False,\n \"ReactomePathways\": True,\n \"reactome.homo_sapiens.interactions.tab-delimited\": False,\n \"ReactomePathwaysRelation\": True}\n return maps[alias]", "def __init__(self, pl, matrix, materialnodebysymbol):\n super(BoundPolygons, self).__init__(pl, matrix, materialnodebysymbol)", "def img_map(ts):\n image_map = \"\"\n texdata = bpy.data.textures[ts.texture]\n if ts.mapping == \"FLAT\":\n image_map = \"map_type 0 \"\n elif ts.mapping == \"SPHERE\":\n image_map = \"map_type 1 \"\n elif ts.mapping == \"TUBE\":\n image_map = \"map_type 2 \"\n\n # map_type 3 and 4 in development (?) (ENV in pov 3.8)\n # for POV-Ray, currently they just seem to default back to Flat (type 0)\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 3 \"\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 4 \"\n if ts.use_interpolation: # Available if image sampling class reactivated?\n image_map += \" interpolate 2 \"\n if texdata.extension == \"CLIP\":\n image_map += \" once \"\n # image_map += \"}\"\n # if ts.mapping=='CUBE':\n # image_map+= \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_map == \"\":\n # print(\" No texture image found \")\n return image_map", "def add_shapes(df):\n gdf = gpd.read_file(Essex_Shape_Config.LAU_SHP_FILE)\n gdf.replace({'LAD19NM': {\"Epping Forest\": \"Epping\"}}, inplace = True) #epping is shortened in scraping\n merge_df = pd.merge(df,gdf, left_on='search_term', right_on='LAD19NM')\n merge_gdf = gpd.GeoDataFrame(merge_df)\n return merge_gdf", "def plot_map(ax=None, alpha=0.3, zorder=0):\n if ax is None:\n ax = plt.axes(projection=ccrs.PlateCarree())\n # national boundaries\n boundaries_50m = cartopy.feature.NaturalEarthFeature(category='cultural',\n name='admin_0_boundary_lines_land',\n scale='50m',\n edgecolor='k',\n facecolor='none')\n ax.add_feature(boundaries_50m,\n alpha=alpha,\n zorder=zorder)\n # states\n states_50m = cartopy.feature.NaturalEarthFeature(category='cultural',\n name='admin_1_states_provinces_lines',\n scale='50m',\n edgecolor='k',\n facecolor='none')\n ax.add_feature(states_50m,\n alpha=alpha,\n zorder=zorder)\n # coastlines\n coastline_50m = cartopy.feature.NaturalEarthFeature('physical',\n 'coastline',\n '50m',\n edgecolor='k',\n facecolor='none')\n ax.add_feature(coastline_50m,\n alpha=alpha,\n zorder=zorder)\n # lakes\n lakes_110m = cartopy.feature.NaturalEarthFeature('physical',\n 'lakes',\n '110m',\n edgecolor='k',\n facecolor='none')\n # add all shape objects\n ax.add_feature(lakes_110m,\n alpha=alpha,\n zorder=zorder)\n\n return ax", "def get_shapes4country(country='South Africa'):\n # location of data\n URL = \"http://www.naturalearthdata.com/downloads/10m-cultural-vectors\"\n URL += \"/10m-admin-1-states-provinces/\"\n # Shapefiles locally?\n # TODO - update to download automatically and store in AC_tools' data directory\n shapefiles = 'ne_10m_admin_1_states_provinces_lakes'\n# shapefiles = 'ne_10m_admin_1_states_provinces'\n folder = '/mnt/lustre/users/ts551/labbook/Python_progs/'\n folder += '/AC_tools/data/shapefiles/{}'.format(shapefiles, shapefiles)\n states = geopandas.read_file(folder)\n # Just select state of interest\n choosen_states = states.query(\"admin == '{}'\".format(country))\n choosen_states = choosen_states.reset_index(drop=True)\n # Get the shapes\n shapes = zip(choosen_states.geometry, range(len(choosen_states)))\n return shapes", "def generate_xmap(x_len, y_len, all_cids, all_xcoords, all_ycoords):\r\n # Determine figure height and width\"\"\"\r\n img_height = x_len * 80\r\n img_width = y_len * 80\r\n\r\n # Write html script which allows for mouseover of labels\r\n xmap = []\r\n for cid, x, y in zip(all_cids, all_xcoords, all_ycoords):\r\n xmap.append(AREA_SRC % (x, img_height - y, cid, cid))\r\n\r\n return xmap, img_height, img_width", "def m2_m3_make_lower_shape_points_list(dx, dy, m_info, SEN_info):\n \"\"\"\n 1 Get information from m_info & SEN_info.\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n l_n = SEN_info[4]\n r_n = SEN_info[5]\n set = SEN_info[6]\n l_offset = SEN_info[7]\n r_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n lower_shape_left_upper_row list\n lower_shape_left_lower_row list\n\n lower_shape_right_upper_row list\n lower_shape_right_lower_row list\n \"\"\"\n # Leftside\n lower_shape_left_upper_row = []\n lower_shape_left_lower_row = []\n\n for i in range(l_n):\n # upper row\n ix = i * l_offset + set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n lower_shape_left_upper_row.extend((upper_points))\n\n for i in range(l_n - 1, -1, -1):\n # lower row\n ix = i * l_offset + set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n lower_shape_left_lower_row.extend(lower_points)\n\n # Rightside\n lower_shape_right_upper_row = []\n lower_shape_right_lower_row = []\n\n for i in range(r_n):\n # upper row\n ix = x_m - i * r_offset - set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n lower_shape_right_upper_row.extend((upper_points))\n\n for i in range(r_n - 1, -1, -1):\n # lower row\n ix = x_m - i * r_offset - set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n lower_shape_right_lower_row.extend(lower_points)\n\n lower_shape_left = [lower_shape_left_upper_row, lower_shape_left_lower_row]\n lower_shape_right = [lower_shape_right_upper_row, lower_shape_right_lower_row]\n\n return lower_shape_left, lower_shape_right", "def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r", "def drawMap(self):\n for position, contain in self.map.items():\n if contain is \"block\":\n self.blocks.add(Block(position[1]*50,position[0]*50))\n elif contain is \"Coins\":\n self.Coins.add(Coins(position[1]*50+10,position[0]*50+10))", "def generate_map(self):\n map = Map.Map(50, 80, 1000, 10, 6)\n\n #here we can map out our larger map structure\n if self.level < 2:\n map.make_greathall()\n elif self.level >= 2 and self.level < 20:\n map.make_map()\n elif self.level >= 20:\n map.make_cave()\n else:\n map.make_map()\n return map", "def three_moment0_maps(gal_indices,lines,**kwargs):\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig = plt.figure(figsize=(17,14),constrained_layout=False)\n gs1 = fig.add_gridspec(nrows=3, ncols=3, wspace=0.05, hspace=0.02)\n\n if not p.R_max: p.R_max = [60.]*3\n\n rotate = False\n for row_i,gal_index in enumerate(gal_indices):\n line_i = 0\n for quant in lines:\n legend = True\n label = False\n if quant == lines[-1]: label = True\n #if line_i == len(lines)-1: legend = True\n ax1 = fig.add_subplot(gs1[row_i,line_i])\n moment0_map(gal_index=gal_index,cmap=p.cmap,quant=quant,add=True,ax=ax1,R_max=p.R_max[row_i],legend=legend,label=label)\n # Make a size indicator\n ax1.set_xlim([-p.R_max[row_i],p.R_max[row_i]]); ax1.set_ylim([-p.R_max[row_i],p.R_max[row_i]])\n ax1.plot([p.R_max[row_i]*(1-0.35),p.R_max[row_i]*(1-0.35)+10],[p.R_max[row_i]*(-1+0.15),p.R_max[row_i]*(-1+0.15)],lw=4,color='white')\n ax1.text(p.R_max[row_i]*(1-0.45),p.R_max[row_i]*(-1+0.25),'10 kpc',color='white',fontsize=14)\n # Remove axes ticks\n ax1.tick_params(axis='x',which='both',labelbottom=False,bottom=False,top=False)\n ax1.tick_params(axis='y',which='both',labelleft=False,bottom=False,top=False) \n line_i += 1\n ax1.text(p.R_max[row_i]*(-1+0.15),p.R_max[row_i]*(1-0.2),quant.replace('L_',''),color='white',fontsize=18)\n # s = segs\n\n gs1.update(top=0.98,bottom=0.02,left=0.02,right=0.93)\n #fig.text(0.97,0.5, 'log surface brightness density (Jy${\\cdot}$km/s / kpc$^2$)', va='center', ha='center', fontsize=22, rotation='vertical')\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'pretty/'): os.mkdir(p.d_plot + 'pretty/')\n plt.savefig('plots/pretty/moment0_maps.png',format='png',dpi=200)", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def showPolygons(list_of_polygons, list_of_colors = False, alpha=0.85, hatch='', fill = True):\n if list_of_colors:\n colors = list_of_colors\n else: \n colormap = plt.cm.summer\n colors = [colormap(i) for i in np.linspace(0.35, 1.0, len(list_of_polygons))]\n\n fig = gcf()\n ax = gca()\n \n \n \n # changed 141019\n if not isinstance(list_of_polygons, list):\n list_of_polygons = [list_of_polygons]\n\n for i,s in enumerate(list_of_polygons):\n ax.add_patch(PolygonPatch(s,fc=colors[i], ec = 'grey', color=colors[i], fill=fill, alpha=alpha, hatch=hatch))\n \n axis('equal')", "def map_from_conf(map_conf, nframes):\n rmlist = []\n for rmconf in map_conf:\n if 'nsamples' in rmconf and 'shape' in rmconf:\n ldmks = landmarks_from_rectangle(rmconf['nsamples'], rmconf['shape'])\n elif 'ldmks' in rmconf:\n ldmks = rmconf['ldmks']\n else:\n raise RuntimeException('No way to compute ldmks')\n\n\n traj = dyn_trajectory(T_from_angle_pos(rmconf['inittheta'],\n rmconf['initpos']),\n T_from_angle_pos(rmconf['deltheta'],\n rmconf['delpos']),\n nframes)\n rm = RigidMotions(RigidBody2D(ldmks), traj)\n rmlist.append(rm)\n\n return LandmarkMap(rmlist)", "def test_multi_area(self):\n pass", "def __init__(self, island_map):\n self.island_map = island_map\n self.landscape_dict = {'M': Mountain,\n 'O': Ocean,\n 'J': Jungle,\n 'S': Savannah,\n 'D': Desert}", "def m2_m3_make_upper_shape_points_list(dx, dy, m_info, SEN_info):\n \"\"\"\n 1 Get information from m_info & SEN_info.\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n l_n = SEN_info[4]\n r_n = SEN_info[5]\n set = SEN_info[6]\n l_offset = SEN_info[7]\n r_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n upper_shape_left_upper_row list\n upper_shape_left_lower_row list\n\n upper_shape_right_upper_row list\n upper_shape_right_lower_row list\n \"\"\"\n # Leftside\n upper_shape_left_upper_row = []\n upper_shape_left_lower_row = []\n\n for i in range(l_n):\n # upper row\n ix = i * l_offset + set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n upper_shape_left_upper_row.extend((upper_points))\n\n for i in range(l_n - 1, -1, -1):\n # lower row\n ix = i * l_offset + set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n upper_shape_left_lower_row.extend(lower_points)\n\n # Rightside\n upper_shape_right_upper_row = []\n upper_shape_right_lower_row = []\n\n for i in range(r_n):\n # upper row\n ix = x_m - i * r_offset - set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_right_upper_row.extend((upper_points))\n\n for i in range(r_n - 1, -1, -1):\n # lower row\n ix = x_m - i * r_offset - set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_right_lower_row.extend(lower_points)\n\n upper_shape_left = [upper_shape_left_upper_row, upper_shape_left_lower_row]\n upper_shape_right = [upper_shape_right_upper_row, upper_shape_right_lower_row]\n\n return upper_shape_left, upper_shape_right", "def requires_mapping(self):", "def get_map(img, vertices, labels, annotations, embeddings, scale, length, embedding_size):\n\n score_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n geo_map = np.zeros((int(img.height * scale), int(img.width * scale), 5), np.float32)\n ignored_map = np.zeros((int(img.height * scale), int(img.width * scale), 1), np.float32)\n embedding_map = np.zeros((int(img.height * scale), int(img.width * scale), embedding_size), np.float32)\n\n index = np.arange(0, length, int(1 / scale))\n index_x, index_y = np.meshgrid(index, index)\n ignored_polys = []\n polys = []\n\n for i, vertice in enumerate(vertices):\n if labels[i] == 0:\n ignored_polys.append(np.around(scale * vertice.reshape((4, 2))).astype(np.int32))\n continue\n if np.any(np.around(scale * vertice.reshape((4, 2))).astype(np.int32) <= 0):\n continue\n if np.any(np.around(scale * vertice.reshape((4, 2))).astype(np.int32) >= int(scale * img.height)):\n continue\n\n poly = np.around(scale * shrink_poly(vertice, coef=0.2).reshape((4, 2))).astype(np.int32) # scaled & shrink\n polys.append(poly)\n temp_mask = np.zeros(score_map.shape[:-1], np.float32)\n cv2.fillPoly(temp_mask, [poly], 1)\n\n theta = find_min_rect_angle(vertice)\n rotate_mat = get_rotate_mat(theta)\n\n rotated_vertices = rotate_vertices(vertice, theta)\n x_min, x_max, y_min, y_max = get_boundary(rotated_vertices)\n rotated_x, rotated_y = rotate_all_pixels(rotate_mat, vertice[0], vertice[1], length)\n\n d1 = rotated_y - y_min\n d1[d1 < 0] = 0\n d2 = y_max - rotated_y\n d2[d2 < 0] = 0\n d3 = rotated_x - x_min\n d3[d3 < 0] = 0\n d4 = x_max - rotated_x\n d4[d4 < 0] = 0\n geo_map[:, :, 0] += d1[index_y, index_x] * temp_mask\n geo_map[:, :, 1] += d2[index_y, index_x] * temp_mask\n geo_map[:, :, 2] += d3[index_y, index_x] * temp_mask\n geo_map[:, :, 3] += d4[index_y, index_x] * temp_mask\n geo_map[:, :, 4] += theta * temp_mask\n\n min_x = int(min(poly[0][0], poly[1][0], poly[2][0], poly[3][0]))\n max_x = int(max(poly[0][0], poly[1][0], poly[2][0], poly[3][0]))\n min_y = int(min(poly[0][1], poly[1][1], poly[2][1], poly[3][1]))\n max_y = int(max(poly[0][1], poly[1][1], poly[2][1], poly[3][1]))\n embedding_map[min_y:max_y, min_x:max_x] = embeddings[annotations[i]]\n\n cv2.fillPoly(ignored_map, ignored_polys, 1)\n cv2.fillPoly(score_map, polys, 1)\n\n return torch.Tensor(score_map).permute(2, 0, 1), torch.Tensor(geo_map).permute(2, 0, 1), \\\n torch.Tensor(ignored_map).permute(2, 0, 1), torch.Tensor(embedding_map).permute(2, 0, 1)", "def railway_areas(osm_path): \n return retrieve(osm_path,'multipolygons',['railway','landuse'],**{'railway':[\"='platform' or \",\"='station' or \",\"='tram_stop'\"],'landuse':[\"='railway'\"]})", "def create_map_fig(choropleth_state_data, choropleth_county_data, boundary_box, choropleth_layer='state', zoom=3.5, center=dict(lat= 37.0902, lon=-95.7129)):\r\n # mapbox stuff\r\n mapbox_access_token = \"pk.eyJ1IjoicGxvdGx5bWFwYm94IiwiYSI6ImNqdnBvNDMyaTAxYzkzeW5ubWdpZ2VjbmMifQ.TXcBE-xg9BFdV2ocecc_7g\"\r\n base_map = \"carto-positron\" \r\n\r\n # 3. default figure object: load state and county choropleth map\r\n # when zoom level < 8, only load state and county choropleth map, else only load point data\r\n if zoom < 8:\r\n if choropleth_layer == 'state':\r\n choropleth_state_data['visible']=True\r\n map_fig = dict(\r\n data=[choropleth_state_data],\r\n layout=dict(\r\n mapbox=dict(\r\n layers=[],\r\n accesstoken=mapbox_access_token,\r\n style=base_map,\r\n center=center,\r\n zoom=zoom,\r\n ),\r\n autosize=True,\r\n margin=map_margin\r\n )\r\n )\r\n elif choropleth_layer == 'county':\r\n choropleth_state_data['visible']=True\r\n map_fig = dict(\r\n data=[choropleth_county_data],\r\n layout=dict(\r\n mapbox=dict(\r\n layers=[],\r\n accesstoken=mapbox_access_token,\r\n style=base_map,\r\n center=center,\r\n zoom=zoom,\r\n ),\r\n autosize=True,\r\n margin=map_margin\r\n )\r\n )\r\n else:\r\n choropleth_state_data['visible']=False\r\n map_fig = dict(\r\n data=[choropleth_state_data],\r\n layout=dict(\r\n mapbox=dict(\r\n layers=[],\r\n accesstoken=mapbox_access_token,\r\n style=base_map,\r\n center=center,\r\n zoom=zoom,\r\n ),\r\n autosize=True,\r\n margin=map_margin\r\n )\r\n )\r\n \r\n else:\r\n # -- Load scattered points -- \r\n point_df = load_point_data(boundary_box)\r\n point_data = create_point_data(point_df,zoom)\r\n map_fig = dict(\r\n data=[point_data],\r\n layout=dict(\r\n mapbox=dict(\r\n layers=[],\r\n accesstoken=mapbox_access_token,\r\n style=base_map,\r\n center=center,\r\n zoom=zoom,\r\n ),\r\n autosize=True,\r\n margin=map_margin\r\n )\r\n ) \r\n\r\n return map_fig", "def render_map(self):\n # first we create a blank image, on which we will draw the base map\n width = self.image_size[0]\n height = self.image_size[1]\n # ex: size of the image 1080 height, 1920 width, 3 channels of colour\n base_map = np.zeros((height, width, 3), np.uint8)\n base_map[:, :] = self.background_color\n\n # we draw each shape of the dictionary on the blank image\n for shape_id in self.shape_dict_filt:\n shape = self.shape_dict_filt[shape_id]\n points = shape.points\n pts = np.array(points, np.int32)\n cv2.polylines(base_map, [pts], True, shape.color_line,\n shape.line_thick, cv2.LINE_AA)\n\n self.map_file = base_map", "def create_platforms(plat_map):\n platform_group = set()\n for plat in plat_map:\n platform_group.add(Platform([(plat[1] + 0.5) * TILE_DIM, (plat[0] + 0.5)\n * TILE_DIM, plat[2], plat[3]]))\n return platform_group", "def test_absolute_areas(self):\n\n assert len(self.test_shape.areas) == 4\n assert len(set([round(i) for i in self.test_shape.areas])) == 3\n assert self.test_shape.areas.count(pytest.approx(60 * math.pi * 2 * 1000)) == 2\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 970)) == 1\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 1030)) == 1", "def basic_map(proj):\n fig = plt.figure(figsize=(15, 10))\n add_metpy_logo(fig, 0, 80, size='large')\n view = fig.add_axes([0, 0, 1, 1], projection=proj)\n view.set_extent([-120, -70, 20, 50])\n view.add_feature(cfeature.STATES.with_scale('50m'))\n view.add_feature(cfeature.OCEAN)\n view.add_feature(cfeature.COASTLINE)\n view.add_feature(cfeature.BORDERS, linestyle=':')\n return fig, view", "def geoshapes(c = False, r = False, t = False):\n if c == True:\n circle = plt.Circle((0,0), radius = 3, fill = None, edgecolor = 'y')\n plt.gca().add_patch(circle)\n elif r == True:\n rectangle = plt.Rectangle((0, 0), 5, 5, fill = None, edgecolor='r')\n plt.gca().add_patch(rectangle)\n elif t == True:\n points = [[0,0], [0,3], [3,3]]\n triangle = plt.Polygon(points, fill = None, edgecolor = 'r')\n plt.gca().add_patch(triangle)\n else:\n print('Error: please input a True value for either c, r , or t')", "def Set(*args):\n return _XCAFDoc.XCAFDoc_ShapeMapTool_Set(*args)", "def ground_contact_geoms(self):\n raise NotImplementedError", "def read_postcode_sectors(path):\n with fiona.open(path, 'r') as pcd_sector_shapes:\n return [pcd for pcd in pcd_sector_shapes]", "def read_postcode_sectors(path):\n with fiona.open(path, 'r') as pcd_sector_shapes:\n return [pcd for pcd in pcd_sector_shapes]", "def get_shape_dicts(route_short_name, septa_fn):\n \n #modify this path to a sqlite file with\n #the gtfs data in it. \n #to create this file, i used\n #https://github.com/jarondl/pygtfs.git\n e = create_engine(septa_fn)\n Session = sessionmaker(bind = e)\n s = Session()\n\n route_block_to_shape = {}\n q = \"SELECT routes.route_short_name, trips.block_id, trips.shape_id \\\n FROM routes INNER JOIN trips \\\n ON routes.route_id == trips.route_id \\\n WHERE routes.route_short_name == :rsn \\\n GROUP BY trips.block_id\"\n results = s.execute(q, {\"rsn\":route_short_name})\n \n for r in results:\n route_block_to_shape[(r.route_short_name, r.block_id)] = r.shape_id\n\n s_ids = set(route_block_to_shape.values())\n shape_to_path = {}\n for s_id in s_ids:\n q = \"SELECT shapes.shape_pt_lat, shapes.shape_pt_lon \\\n FROM shapes \\\n WHERE shapes.shape_id == :s_id\"\n\n results = s.execute(q, {'s_id':s_id})\n path = [tuple(r) for r in results]\n shape_to_path[s_id] = path\n \n s.close()\n\n return route_block_to_shape, shape_to_path", "def print_local_map(self):\n size = 15\n size_half = int(size/2)\n temp_map = []\n for i in range(size):\n map_row = []\n for j in range(size):\n coords = (self.rob_pos[0] + i-size_half,\n self.rob_pos[1] + j-size_half) \n\n if(self.check_limits(coords)):\n if self.rob_pos[0]==coords[0] and self.rob_pos[1]==coords[1]:\n map_row.append(\"R\")\n else:\n map_row.append(self.map[coords[0]][coords[1]])\n temp_map.append(map_row)\n \n #print map upside down cause thats how its saved....\n for i in range(14,-1,-1):\n rospy.logdebug(temp_map[i])", "def setup_maps(self):\n super().setup_maps()\n sprite_classes = {\n \"Obstacles\": Wall,\n \"Background\": QuestSprite,\n }\n self.add_map(TiledMap(resolve_resource_path(\"images/island/island.tmx\"), sprite_classes))", "def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return", "def onRectangle(self, rectangle_geometry):\n\n array = arcpy.Array()\n array.add(rectangle_geometry.upperLeft)\n array.add(rectangle_geometry.upperRight)\n array.add(rectangle_geometry.lowerRight)\n array.add(rectangle_geometry.lowerLeft)\n array.add(rectangle_geometry.upperLeft)\n\n polygon = arcpy.Polygon(array)\n arcpy.env.overwriteOutput = True # temporarily allow overwriting of existing datasets, then disallow.\n arcpy.FeatureToPolygon_management(polygon, r\"C:\\Users\\s\\Documents\\Masters of Geospatial\\GISP\\Assignment2\\GISdata\\survey_area.shp\")\n arcpy.env.overwriteOutput = False\n\n WORKSPACE = r\"C:\\Users\\s\\Documents\\Masters of Geospatial\\GISP\\Assignment2\\GISdata\"\n arcpy.env.workspace = WORKSPACE\n\n # arcpy.env.overwriteOutput = True # temporarily allow overwriting of existing datasets, then disallow.\n # survey_area = arcpy.management.CreateFeatureclass(WORKSPACE, \"survey_area.shp\", \"POLYGON\", spatial_reference=28351)\n # with arcpy.da.InsertCursor(survey_area, ['SHAPE@']) as cursor:\n # cursor.insertRow(rectangle_geometry.Polygon)\n # arcpy.env.overwriteOutput = False\n\n\n # this next code block may be uneccessary\n # mxd = arcpy.mapping.MapDocument(\"CURRENT\")\n # df = arcpy.mapping.ListDataFrames(mxd, \"\")[0]\n # addLayer = arcpy.mapping.Layer(\"survey_area.shp\")\n # arcpy.mapping.AddLayer(df, addLayer, \"BOTTOM\")\n # del mxd, addLayer\n #\n # arcpy.RefreshActiveView()", "def build_maps():\n return render_template(\"maps.html\")" ]
[ "0.63717324", "0.613321", "0.5944496", "0.5944336", "0.5906684", "0.57789654", "0.5769901", "0.57611984", "0.5754275", "0.5728243", "0.57055295", "0.5698698", "0.56905955", "0.56884706", "0.5656957", "0.56549674", "0.5640292", "0.56383663", "0.5637242", "0.5591361", "0.55793846", "0.5560288", "0.5557171", "0.5531239", "0.5492334", "0.5490898", "0.5490517", "0.5460754", "0.54587406", "0.5446237", "0.54343784", "0.54333615", "0.54333615", "0.5430714", "0.5426718", "0.54246163", "0.5416116", "0.5395945", "0.5387738", "0.53647643", "0.5349316", "0.5344955", "0.53380305", "0.5329709", "0.5306703", "0.5299486", "0.5295384", "0.52944535", "0.5285941", "0.52829117", "0.5274413", "0.52731323", "0.5265794", "0.52450806", "0.5241993", "0.5239331", "0.52278566", "0.5226125", "0.5225403", "0.52250636", "0.5210288", "0.52098984", "0.5202775", "0.5199969", "0.51990706", "0.5183972", "0.5176754", "0.51767343", "0.5169048", "0.51682544", "0.516253", "0.5158041", "0.51511157", "0.51478976", "0.51471627", "0.51451814", "0.5144741", "0.51445675", "0.51405054", "0.51350576", "0.513348", "0.5133437", "0.51300025", "0.511877", "0.5113896", "0.51111126", "0.51002777", "0.5096943", "0.50939256", "0.50857997", "0.5084971", "0.50843614", "0.508238", "0.50779605", "0.50779605", "0.5076648", "0.5073642", "0.506514", "0.50577813", "0.50532615", "0.50497794" ]
0.0
-1
initmethod = ['random', 'pca'] algos = ['seq','batch'] all_neigh = ['gaussian','manhatan','bubble','cut_gaussian','epanechicov' ] alfa_types = ['linear','inv','power']
def set_algorithm(self, initmethod = 'pca', algtype = 'batch', neighborhoodmethod = 'gaussian', alfatype = 'inv', alfaini = .5, alfafinal = .005): self.initmethod = initmethod self.algtype = algtype self.alfaini = alfaini self.alfafinal = alfafinal self.neigh = neighborhoodmethod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, options, is_training=False):\n self.options = options\n self.is_training = is_training\n self.add_bi_directional_edges = None\n self.add_self_loop_edges = None\n self.use_reverse_edges = None", "def __init__(self, algorithm, iters, **params):\n self.algorithm=algorithm\n self.iters=iters\n if self.iters <= 0:\n raise ValueError(\"the number of iterations must be greater than zero\")\n if self.algorithm=='dtree':\n self.depth = params.pop('depth')\n if self.algorithm=='ann':\n self.gamma = params.pop('gamma')\n\n self.estimators_ = []\n self.estimator_weights_ = np.zeros(self.iters, dtype=np.float) \n self.clf=object", "def main():\n parser = argparse.ArgumentParser(description='Implementation of the Naive Bayes and Perceptron classifiers')\n parser.add_argument('--statsmode', help='whether to gather stats or not', choices=['y','Y','N','n'], default='n')\n parser.add_argument('--classifier', help='classifier to use', choices=['BAYES', 'PERCEPTRON'], required=True)\n parser.add_argument('--mode', help='image class to test', choices=['VALIDATION', 'TEST'], default='TEST')\n parser.add_argument('--type', help='image type to train', choices=['DIGIT', 'FACE', 'MNIST'], required=True)\n parser.add_argument('--range', metavar=('START', 'END_EXCLUSIVE'), nargs=2, type=int, help='Range of data to test', default=[0, 100])\n parser.add_argument('--trainpercent', metavar='PERCENT', type=int, help='the percent of training data to use (int out of 100)', default=100, dest='percentage')\n parser.add_argument('--smoothing', type=int, help='Laplace smoothing constant (Naive Bayes)', default=2)\n parser.add_argument('--iterations', type=int, help='Number of times to iterate over training data (Perceptron)', default=5)\n parser.add_argument('--debug', help='Outputs more detailed information to stdout', action='store_true')\n parser.add_argument('--statloops', type=int, help='Number of times the classifier iterates over test data (Statistics only)', default=5)\n args = parser.parse_args()\n # image_type = ImageType.DIGIT if args.type == 'DIGIT' else ImageType.FACE\n image_type = None\n if args.type == 'DIGIT':\n image_type = ImageType.DIGIT\n elif args.type == 'FACE':\n image_type = ImageType.FACE\n else:\n image_type = ImageType.MNIST\n mode = Mode.TEST if args.mode == 'TEST' else Mode.VALIDATION\n if args.statsmode == 'y' or args.statsmode == 'Y':\n run_percentages_classifier(args.classifier, image_type, args)\n else:\n run = run_classifier_bayes if args.classifier == 'BAYES' else run_classifier_perceptron\n run(mode, image_type, args)", "def __init__(self, total_args):\n\t\tself.alpha = 0.0\n\t\tself.salida = 0.0\n\t\tself.bias = pseudoaleatorio(-1.0, 1.0)\n\t\tself.pesos = []\n\t\tfor i in range(total_args):\n\t\t\tself.pesos.append(pseudoaleatorio(-1.0, 1.0))", "def __init__(self, type, slen=4, alen=1, lexsize=256):\n # vowels {i,u,e,o} in articulatory features (hi, bk, rd) \\in {-1,0,1}\n self.vowels = N.array(((1.0, 0.0, 0.0),\n (1.0, 1.0, 0.0),\n (0.0, 0.0, 0.0),\n (0.0, 1.0, 0.0)))\n self.vf = {(1.0, 0.0, 0.0): \"i\",\n (1.0, 1.0, 0.0): \"u\",\n (0.0, 0.0, 0.0): \"e\",\n (0.0, 1.0, 0.0): \"o\"}\n self.consonants = list(\"bcdfghjklmnpqrstvwxyz\")\n # acoustic:articulatory mapping fxn for vowel prototypes\n # acoustic reps are F1,F2' pairs, articulatory reps are feature-based\n self.vowel_map = {}\n self.vowel_spread = 0\n self.memory = N.empty((lexsize, slen, 2))\n # each agent has its own articulatory variability\n #TODO: maybe this should be inferred by the learners\n # on the basis of their data?\n self.alpha = N.random.normal(15, 2)\n self.beta = N.random.normal(2, 0.25)\n if self.beta < 1.0:\n self.beta = 1.1\n\n if type == \"learner\":\n self.stems = N.empty((lexsize, 4, 3), dtype=float)\n #self.affixes = N.empty((1,4))\n elif type == \"speaker\":\n tmp = [[x, y, 0.0] for x in [0.0, 1.0] for y in [0.0, 1.0]]\n self.stems = N.array([[a, b, c, d] for a in tmp for b in tmp\n for c in tmp for d in tmp])\n else:\n sys.exit(\"Undefined agent type. Aborting.\")\n # vectorized versions of some fxns\n self.vec_perceive = vectorize(self.perceive)\n self.vec_articulate = vectorize(self.articulate)\n self.vec_acoustify = vectorize(self.acoustify)", "def caixa_preta(\n algorithm=1,\n generations=50,\n population=[],\n is_single=False,\n is_uniform=True,\n is_elitist=True,\n is_ultra_elitist=False,\n elitism=0.85,\n xfactor=0.3,\n win_factor=1\n ):\n\n data_fit = []\n\n if algorithm == 1:\n for gen in range(generations):\n # calcula o fitness da população\n fit = fitness(population)\n # faz a seleção por torneio\n f_selec_torn, f_perd_torn = torneio(population, fit, win_factor)\n # faz o cruzamento\n child_sliced = cruzamento(f_selec_torn, is_single=is_single, is_uniform=is_uniform)\n # faz a mutação do filho\n child_mutaded = mutacao(child_sliced, xfactor=xfactor)\n # seleciona a nova população\n population = substituicao(population, father=f_selec_torn, children=child_mutaded, elitism=elitism, is_ultra_elitist=is_ultra_elitist, is_elitist=is_elitist)\n\n if type(population[0][0]) != int:\n import pdb;pdb.set_trace()\n\n fit = fitness(population)\n\n data_fit.append(best_fitness(fit))\n\n else:\n for gen in range(generations):\n # calcula o fitness da população\n fit = fitness(population)\n # calcula a média do fitness do conjunto\n fit_avg = fitness_medio(fit)\n # calcula a probabilidade de seleção\n prob_selec = selecao_prob(population, fit, fit_avg)\n # faz a seleção por rodeio\n f_selec_rol = roleta(population, prob_selec)\n # faz o cruzamento\n child_sliced = cruzamento(f_selec_rol, is_single=is_single, is_uniform=is_uniform)\n # faz a mutação do filho\n child_mutaded = mutacao(child_sliced, xfactor)\n # seleciona a nova população\n population = substituicao(population, f_selec_rol, child_mutaded, elitism, is_elitist)\n fit = fitness(population)\n\n data_fit.append(best_fitness(fit))\n\n return data_fit", "def __init__(self, neighbourhood, algorithm, iterations, set_up):\n self.input = neighbourhood\n self.algorithm = algorithm\n self.set_up = set_up\n self.iterations = int(iterations)\n self.configs = self.get_configs()\n self.houses = self.load_houses()\n self.big_iterations = -1\n self.small_iterations = 0\n self.caps = []\n self.batteries = {}\n self.lowest = 99999\n self.index = 0\n self.run_algorithm()", "def probinit(self, aaa, n_obj):\n # Set algorithm...\n if aaa == 'nsga':\n algo = nsga_II(m=0.05)\n else:\n algo = jde(memory=True)\n #algo = mde_pbx()\n #algo = de_1220()\n\n # ...and initialize problem with instance atributes\n prob = mga_1dsm(seq = self.FBseq,\n multi_objective = n_obj,\n dsm_dv_barrier = self.MAX_DV)\n\n prob.set_vinf((self.C3)**0.5)\n prob.set_tof(self.TOF[0], self.TOF[1])\n prob.set_entry_barrier(self.entry_barrier)\n prob.set_launch_window(self.EPOCHSTART, self.EPOCHEND)\n return prob, algo", "def default_pars(type='simple_plus'):\n norients = 16\n orients = [o * np.pi / norients for o in range(norients)]\n divfreqs = [2, 3, 4, 6, 11, 18]\n freqs = [1. / n for n in divfreqs]\n phases = [0]\n\n # this is something new.\n # there are 6 steps. and I can turn on/off these steps. potentially.\n steps = {'preproc_resize',\n 'preproc_lowpass',\n 'normin',\n 'filter', 'activ',\n 'normout', 'dimr'}\n\n # dict with all representation parameters\n representation = {\n\n # - preprocessing\n # prepare images before processing\n 'preproc': {\n # resize input images by keeping aspect ratio and fix the biggest edge\n 'max_edge': 150,\n # kernel size of the box low pass filter\n 'lsum_ksize': 3,\n },\n\n # - input local normalization\n # local zero-mean, unit-magnitude\n 'normin': {\n # kernel shape of the local normalization\n 'kshape': (3, 3),\n # magnitude threshold\n # if the vector's length is below, it doesn't get resized\n 'threshold': 1.0,\n },\n\n # - linear filtering\n 'filter': {\n # kernel shape of the gabors\n 'kshape': (43, 43),\n # list of orientations\n 'orients': orients,\n # list of frequencies\n 'freqs': freqs,\n # list of phases\n 'phases': phases,\n # threshold (variance explained) for the separable convolution\n # should be set to 1 or bigger when debugging.\n 'sep_threshold': .9,\n 'max_component': 100000,\n # just big enough (using inf would be more correct technically, though that will be a problem for JSON)\n 'fix_bug': False, # whether fixing separated convolution bug.\n 'mode': 'same', # this is only available for non legacy. can be also ``'valid'``.\n },\n\n # - simple non-linear activation\n 'activ': {\n # minimum output (clamp)\n 'minout': 0,\n # maximum output (clamp)\n 'maxout': 1,\n 'type': 'clamp', # can also be `square`, `exp`, `recsquare`, `rec`\n },\n\n # - output local normalization\n 'normout': {\n # kernel shape of the local normalization\n 'kshape': (3, 3),\n # magnitude threshold\n # if the vector's length is below, it doesn't get resized\n 'threshold': 1.0,\n },\n\n # - dimension reduction\n 'dimr': {\n # kernel size of the local sum (2d slice)\n 'lsum_ksize': 17,\n # fixed output shape (only the first 2 dimensions, y and x)\n 'outshape': (30, 30),\n },\n }\n\n if type == 'simple_plusplus_2nd_scale':\n representation['preproc']['max_edge'] = 75\n\n if type == 'simple_plus':\n featsel = {\n # Include representation output ? True or False\n 'output': True,\n\n # Include grayscale values ? None or (height, width)\n 'input_gray': (100, 100),\n # Include color histograms ? None or nbins per color\n 'input_colorhists': None,\n # Include input norm histograms ? None or (division, nfeatures)\n 'normin_hists': None,\n # Include filter output histograms ? None or (division, nfeatures)\n 'filter_hists': None,\n # Include activation output histograms ? None or (division, nfeatures)\n 'activ_hists': (2, 10000),\n # Include output norm histograms ? None or (division, nfeatures)\n 'normout_hists': (1, 10000),\n # Include representation output histograms ? None or (division, nfeatures)\n 'dimr_hists': (1, 10000),\n }\n elif type == 'simple':\n featsel = {\n # Include representation output ? True or False\n 'output': True,\n\n # Include grayscale values ? None or (height, width)\n 'input_gray': None,\n # Include color histograms ? None or nbins per color\n 'input_colorhists': None,\n # Include input norm histograms ? None or (division, nfeatures)\n 'normin_hists': None,\n # Include filter output histograms ? None or (division, nfeatures)\n 'filter_hists': None,\n # Include activation output histograms ? None or (division, nfeatures)\n 'activ_hists': None,\n # Include output norm histograms ? None or (division, nfeatures)\n 'normout_hists': None,\n # Include representation output histograms ? None or (division, nfeatures)\n 'dimr_hists': None,\n }\n elif type == 'simple_plusplus_2nd_scale':\n featsel = {\n # Include representation output ? True or False\n 'output': True,\n\n # Include grayscale values ? None or (height, width)\n 'input_gray': (37, 37),\n # Include color histograms ? None or nbins per color\n 'input_colorhists': None,\n # Include input norm histograms ? None or (division, nfeatures)\n 'normin_hists': None,\n # Include filter output histograms ? None or (division, nfeatures)\n 'filter_hists': None,\n # Include activation output histograms ? None or (division, nfeatures)\n 'activ_hists': (2, 10000),\n # Include output norm histograms ? None or (division, nfeatures)\n 'normout_hists': (1, 10000),\n # Include representation output histograms ? None or (division, nfeatures)\n 'dimr_hists': (1, 10000),\n }\n else:\n raise NotImplementedError('not supported pars type!')\n\n return deepcopy({'steps': steps,\n 'representation': representation,\n 'featsel': featsel})", "def __init__(self, features, labels, adj,\n ising=True, symmetrize=True,\n max_neigh_sample=None, positive=True):\n self.features = features\n self.labels = labels\n self.adj = adj\n self.ising = ising\n self.symmetrize = symmetrize\n self.max_neigh_sample = max_neigh_sample\n self.positive = positive", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def init_algorithm(config, id_algo, id_discdds, discdds):\n # instance the algorithm\n set_current_config(config)\n algo = config.algos.instance(id_algo) \n # initialize the algorithm with the dynamics\n # TODO: add computation time\n #t0 = time.clock()\n algo.set_name_for_log(id_algo)\n algo.init(id_discdds, discdds) \n #init_time = time.clock() - t0\n return algo", "def __init__(self):\n self.rho=[]\n self.te=[]\n self.ti=[]\n self.ne=[]\n self.ni=[]\n self.ni1=[]\n self.ni2=[]\n self.ni3=[]\n self.vtor=[]\n self.zeff=[]\n\n self.nion=1\n self.Z=[]\n self.A=[]\n self.coll_mode=[]", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def general_gantest(proba, nbr_qubits):\n for m in [4096, 2048]:\n for l in [1, 2, 3]:\n print(\"Easy mode results for m={} and l={}:\".format(m, l))\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=0, easy=True)\n print(\"\\n\")\n print(\"Distribution learning results for m={} and l={}:\".format(m, l))\n for d in [256, 512]:\n print(\"For \", d, \": \")\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=d, easy=False)\n print(\"Singleton learning results for m={} and l={}:\".format(m, l))\n Variationer_learn_gan(1000, l, m, proba=proba, n=nbr_qubits, distri_size=0, easy=False)", "def __init__(self, folder):\n print \"folder passed is \", folder\n self.folder = folder\n self.geometry = gf.geometry(self.folder)\n self.elements = gf.dictionary_set()\n self.area = np.zeros(shape = (8))\n self.Vol = (self.geometry.properties['span_number']*(self.geometry.properties['span_width']*\n self.geometry.properties['span_height'] + self.geometry.properties['cover_height']\n *self.geometry.properties['span_width']/2))\n self.F = np.zeros(shape = (8, 8))\n of.view_factor(self.geometry, self.F, self.area, self.Vol)\n tran = [self.geometry.properties['tra_cover_out'],0.0,0.0,\n self.geometry.properties['tra_sidewall_out'],\n self.geometry.properties['tra_cover_in'],\n self.geometry.properties['tra_sidewall_in'],0.0,0.0]\n emi = [self.geometry.properties['emi_cover_out'],1.0,1.0,\n self.geometry.properties['emi_sidewall_out'],\n self.geometry.properties['emi_cover_in'],\n self.geometry.properties['emi_sidewall_in'],1.0,1.0] \n self.tr, self.em, self.re = of.optictal_prop(tran,emi)\n if ((self.tr + self.em).any() > 1.0):\n print \"error in optical properties\"\n self.T = np.zeros(shape = (2,10))\n self.RH = np.zeros(shape = (2,10))\n # 8 inside,9 outside \n self.qcond = np.zeros(shape = (2,8))\n self.qconv = np.zeros(shape = (2,8))\n self.qrad = np.zeros(shape = (2,8))\n self.j = np.zeros(shape = (2,8))\n self.g = np.zeros(shape = (2,8))\n self.alpha = np.zeros(shape = (2,8))\n deltaT = 300\n RH_in = 0.6\n fg.set_initial_conditions(self.geometry.properties['t_air_inside'],\n 278,\n RH_in,self.T,self.RH , self.geometry.properties['t_air'],self.g,\n self.geometry.properties['sky_temp'])\n self.T, self.j, self.g, self.alpha, self.qrad, self.qconv = fg.solver_T(self.T,self.qrad,self.qconv,self.alpha,self.j,self.g,self.em,self.tr,\n self.geometry.properties['wind_speed'],\n self.F,self.geometry.properties['heat_flux'],1,1.0,self.area,\n self.geometry.properties['rho'],self.geometry.properties['cp'],\n self.Vol,self.geometry.properties['degree_window'],deltaT)", "def __init__(self,numagents,bits,in_nodes,print_flag=False):\n self.numagents = numagents\n self.agent = []\n self.in_bits = in_nodes\n\n \"\"\" initialize empty dict for trajectories\"\"\"\n self.trajectories={}\n self.accepted_changes = 0\n self.learning_rate = numpy.zeros((3,1),dtype=int)\n\n \"\"\" initialize network topology \"\"\"\n self.adjacency = numpy.zeros((numagents,numagents),dtype=int)\n\n\n # Erdös Renyi network; symmetric G(n,p) with p=0.5;\n # if p > ln(n)/n graph is almost surely connected\n \n p0=0.75\n for ia1 in range(self.numagents):\n for ia2 in range(ia1+1,self.numagents):\n if ia1 < 3 and ia2 < 3: self.adjacency[ia1,ia2] = 1 #fully connected in_nodes\n else:\n pER = numpy.random.random()\n if pER <= p0: self.adjacency[ia1,ia2] = 1\n self.adjacency[ia2,ia1] = self.adjacency[ia1,ia2] #symmetrize\n\n\n\n \"\"\"#complete graph (dense)\n for ia1 in range(self.numagents):\n for ia2 in range(ia1+1,self.numagents):\n self.adjacency[ia1,ia2] = 1\n self.adjacency[ia2,ia1] = 1\n \"\"\"\n\n\n \"\"\"\n #4-lattice\n for ia in range(self.numagents):\n if ia > 1 and ia < self.numagents-2:\n ia1 = ia + 1\n ia2 = ia + 2\n ia3 = ia - 1\n ia4 = ia - 2\n\n elif ia == 0:\n ia1 = ia + 1\n ia2 = ia + 2\n ia3 = self.numagents-1\n ia4 = self.numagents-2\n\n elif ia == 1:\n ia1 = ia + 1\n ia2 = ia + 2\n ia3 = ia - 1\n ia4 = self.numagents-1\n\n elif ia == self.numagents-2:\n ia1 = ia + 1\n ia2 = 0\n ia3 = ia - 1\n ia4 = ia - 2\n elif ia == self.numagents-1:\n ia1 = 0\n ia2 = 1\n ia3 = ia- 1\n ia4 = ia -2\n\n self.adjacency[ia,ia1] = 1\n self.adjacency[ia1,ia] = 1\n self.adjacency[ia,ia2] = 1\n self.adjacency[ia2,ia] = 1\n self.adjacency[ia,ia3] = 1\n self.adjacency[ia3,ia] = 1\n self.adjacency[ia, ia4] = 1\n self.adjacency[ia4, ia] = 1\n \"\"\"\n\n \n #SF-network, BA(m=1)\n \"\"\"\n m=1\n main_graph = networkx.barabasi_albert_graph(self.numagents, m)\n self.adjacency = networkx.to_numpy_array(main_graph)\n \"\"\"\n\n # density is number of edges/number of possible edges\n self.graph_density = numpy.sum(self.adjacency)/(self.numagents*(self.numagents-1) )\n\n\n \"\"\" initialize agents: \"\"\"\n iagents = range(self.in_bits)\n majority = True\n for ia in range(self.numagents):\n nbh = 3 #ALT: take mean neighborhood from graph; nbh = global_functions.dict_from_adj(self.adjacency)\n mu1 = numpy.random.randint(1, nbh + 1) # int(thresh)\n mu2 = numpy.random.randint(mu1, nbh + 1) #\n if ia in iagents: i = True\n else: i = False\n self.agent.append(agent(ia,ia,bits,nbh,i,mu1,mu2,majority=majority)) #initialize with majority rule\n #print(ia,self.agent[ia].rule)\n\n if print_flag:\n adj_dict = global_functions.dict_from_adj(self.adjacency)\n numagents = 0\n for ia in range(len(self.adjacency)):\n if numpy.sum(self.adjacency[ia]) != 0 or numpy.sum(self.adjacency[:, ia]) != 0: numagents += 1\n print(\"Initialize network with\", self.numagents, \"agents on a graph with\"\n , len(adj_dict), \"edges and density of\", numpy.around(self.graph_density,2))\n print(\"Input agents:\", iagents)\n print(\"Average degree:\", numpy.around(numpy.sum(self.adjacency)/self.numagents,2))\n\n if self.numagents <= 12:\n for l in self.adjacency: print(l)\n else:\n for key in adj_dict: print(key,\"->\",adj_dict[key])\n\n print(\"Perceptual rule of type:\")\n if majority: print(\"majority rule: x -> 1: if sum(input) >= out_degree/2\")\n else:\n print(\"x -> 1: if mu1 <= sum(input) <= mu2\")\n print(\"mu1 =\",[self.agent[ia].mu1 for ia in range(self.numagents)],\n \"mu2 =\",[self.agent[ia].mu2 for ia in range(self.numagents)])\n #for key in itertools.product({0, 1}, repeat = self.out_bits):\n # print(key, self.agent[0].update(key))\n print(\"Network successfully initialized\")", "def __init__(self, epsilon=0.05,gamma=0.6, alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n FQLearningAgent.__init__(self, **args)", "def __init__(\n self,\n net,\n eps,\n sua=False,\n pi=False,\n update_freq=1,\n alpha=1.0,\n constraint_norm=False,\n data_size=50000,\n use_batch_norm=False,\n ):\n self.net = net\n self.state = net.state_dict()\n self.mean_state = copy.deepcopy(self.state)\n self.data_size = data_size\n self.use_batch_norm = use_batch_norm\n\n self.eps = eps\n self.sua = sua\n self.pi = pi\n self.update_freq = update_freq\n self.alpha = alpha\n self.constraint_norm = constraint_norm\n self.params = []\n self._iteration_counter = 0\n for mod in net.modules():\n mod_class = mod.__class__.__name__\n if mod_class in [\"Linear\", \"Conv2d\"]:\n mod.register_forward_pre_hook(self._save_input)\n mod.register_backward_hook(self._save_grad_output)\n params = [mod.weight]\n if mod.bias is not None:\n params.append(mod.bias)\n d = {\"params\": params, \"mod\": mod, \"layer_type\": mod_class}\n self.params.append(d)\n\n elif \"BatchNorm\" in mod_class and use_batch_norm:\n mod.register_forward_pre_hook(self._save_input)\n mod.register_backward_hook(self._save_grad_output)\n\n params = [mod.weight, mod.bias]\n\n d = {\"params\": params, \"mod\": mod, \"layer_type\": mod_class}\n self.params.append(d)\n\n super(KFACLaplace, self).__init__(self.params, {})\n # super(KFACLaplace, self).__init__()", "def extractBestAlgorithms(args = algs2009, f_factor=2,\n target_lb=1e-8, target_ub=1e22):\n\n # TODO: use pproc.TargetValues class as input target values\n # default target values:\n targets = pproc.TargetValues(\n 10**np.arange(np.log10(max((1e-8, target_lb))),\n np.log10(target_ub) + 1e-9, 0.2))\n # there should be a simpler way to express this to become the\n # interface of this function\n\n print 'Loading algorithm data from given algorithm list...\\n' \n\n verbose = True\n dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose)\n\n print 'This may take a while (depending on the number of algorithms)'\n\n selectedAlgsPerProblem = {}\n for f, i in pproc.dictAlgByFun(dictAlg).iteritems():\n for d, j in pproc.dictAlgByDim(i).iteritems():\n selectedAlgsPerProblemDF = []\n best = BestAlgSet(j)\n \n for i in range(0, len(best.target)):\n t = best.target[i]\n # if ((t <= target_ub) and (t >= target_lb)):\n if toolsstats.in_approximately(t,\n targets((f, d), discretize=True)):\n # add best for this target:\n selectedAlgsPerProblemDF.append(best.algs[i])\n \n # add second best or all algorithms that have an ERT\n # within a factor of f_factor of the best:\n secondbest_ERT = np.infty\n secondbest_str = ''\n secondbest_included = False \n for astring in j:\n currdictalg = dictAlg[astring].dictByDim()\n if currdictalg.has_key(d):\n curralgdata = currdictalg[d][f-1] \n currERT = curralgdata.detERT([t])[0]\n if (astring != best.algs[i]):\n if (currERT < secondbest_ERT):\n secondbest_ERT = currERT\n secondbest_str = astring\n if (currERT <= best.detERT([t])[0] * f_factor):\n selectedAlgsPerProblemDF.append(astring)\n secondbest_included = True\n if not (secondbest_included) and (secondbest_str != ''):\n selectedAlgsPerProblemDF.append(secondbest_str)\n \n if len(selectedAlgsPerProblemDF) > 0:\n selectedAlgsPerProblem[(d, f)] = selectedAlgsPerProblemDF\n \n print 'pre-processing of function', f, 'done.' \n \n print 'loading of best algorithm(s) data done.'\n \n countsperalgorithm = {}\n for (d, f) in selectedAlgsPerProblem:\n print 'dimension:', d, ', function:', f\n setofalgs = set(selectedAlgsPerProblem[d,f])\n \n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += selectedAlgsPerProblem[d,f].count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"\n \n return selectedalgsperdimension", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n #self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)", "def init_process(mech):\n gases[mech] = ct.Solution(mech)\n gases[mech].transport_model = 'Multi'", "def on_game_start(self, config):\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]\n self.structureInPlace = False\n self.destructorsLeft = 0\n self.destructorsMiddle = 0\n self.juicyTargets = 0\n self.juicyCorner = False\n self.floodGatesOpen = True\n self.defenseRating = 0\n self.defenseCost = 0\n self.attackedFromLeft = 0\n\n self.mainStructure = [[ 25, 13],[ 24, 12],[ 23, 11],[ 22, 10],[ 21, 9],[ 20, 8],[ 19, 7],[ 18, 6],[ 17, 5],[ 16, 4],[ 15, 3],[ 14, 2],[ 13, 1]]\n\n\n self.filter0 =[[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],\\\n [ 9, 13],[ 10, 13],[ 17, 13],[ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13]] \n self.filter1 = [[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],[ 9, 13],[ 10, 13],[ 17, 13],\\\n [ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13],[ 2, 12],[ 25, 12],[ 3, 11],[ 24, 11],[ 4, 10]]\n self.filter2 = [[ 0, 13],[ 1, 13],[ 2, 13],[ 3, 13],[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13],[ 8, 13],[ 9, 13],[ 10, 13],[ 17, 13],\\\n [ 18, 13],[ 19, 13],[ 20, 13],[ 21, 13],[ 22, 13],[ 23, 13],[ 24, 13],[ 25, 13],[ 26, 13],[ 27, 13],[ 2, 12],[ 25, 12],[ 3, 11],[ 24, 11],[ 4, 10]]\n self.filter3 = [[ 4, 13],[ 5, 13],[ 6, 13],[ 7, 13]]\n\n self.destructor0 = [[ 13, 13]]\n self.destructor1 = [[ 13, 13],[ 14, 13]]\n self.destructor2 = [[ 13, 13],[ 14, 13]]\n self.destructor3 = [[ 13, 13],[ 14, 13]]\n\n self.initExclusionList = [[0,0]]\n self.exclusionList = [[0,0]]", "def __init__(self, G, population, condition_axelrod, condition_centola):\n super(ExpandableAlgorithm, self).__init__(G, population)\n self._overlap_function = overlap_similarity\n self._post_args = None\n self.condition_axelrod = condition_axelrod\n self.condition_centola = condition_centola", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['D_adv','D_cls', 'G_A','G_B', 'cycle_A','G_adv','reg','idt']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n visual_names_A = ['real_A','A','mask_A', 'fake_B','B','mask_B', 'rec_A']\n #visual_names_B = ['real_B', 'fake_A', 'rec_B']\n # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)\n # if self.isTrain and self.opt.lambda_identity > 0.0:\n # visual_names_A.append('idt_B')\n # #visual_names_B.append('idt_A')\n\n # combine visualizations for A and B\n self.visual_names = visual_names_A #+ visual_names_B\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.\n \n\n # define networks (both Generators and discriminators)\n # The naming is different from those used in the paper.\n # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n self.netG_A=[]\n self.netG_B=[]\n self.netG_Amask=[]\n self.netG_Bmask=[]\n if self.isTrain:\n self.model_names += ['G_A', 'G_Amask', 'G_B', 'G_Bmask', 'D', 'Dadv']\n else: # during test time, only load Gs\n self.model_names = ['G_A', 'G_Amask', 'G_B', 'G_Bmask']\n for i in range(opt.num_class):\n tG_A, tG_Amask = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n \n self.netG_A.append(tG_A)\n self.netG_Amask.append(tG_Amask)\n tG_B, tG_Bmask = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_B.append(tG_B)\n self.netG_Bmask.append(tG_Bmask)\n\n self.netD= networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,opt.num_class)\n self.netDadv = networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids, 1)\n \n\n if self.isTrain:\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\n assert(opt.input_nc == opt.output_nc)\n # create image buffer to store previously generated images\n # self.fake_A_pool = ImagePool(opt.pool_size)\n # create image buffer to store previously generated images\n # self.fake_B_pool = ImagePool(opt.pool_size)\n # define loss functions\n # define GAN loss.\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionGAN_D = networks.GANLoss('multi-label').to(self.device)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizers_G=[]\n for i in range(opt.num_class):\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A[i].parameters(\n ), self.netG_B[i].parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) \n self.optimizers_G.append(self.optimizer_G)\n \n self.optimizer_D = torch.optim.Adam(self.netD.parameters(\n ), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers=self.optimizers_G+[self.optimizer_D]", "def construct_parameters(self, method= \"random\", W = np.zeros(1), b = np.zeros(1), initialization=True):\n #W = np.asarray(W, dtype=object)\n #b = np.asarray(b, dtype=object)\n for i in reversed(range(1,len(self.architecture))):\n \n if initialization==True:\n if self.activations[i-1] in {'relu' , 'leakyrelu' , 'ealu'}:\n variance = np.sqrt(2/(self.architecture[i-1])) #He initialization\n elif self.activations[i-1] == 'tanh':\n variance = np.sqrt(6/(self.architecture[i-1] + self.architecture[i])) #Xavier initialization\n elif self.activations[i-1] in ('swish' , 'sigmoid'):\n variance = np.sqrt(1/(self.architecture[i-1]))\n else:\n variance = 1\n \n elif initialization == False:\n variance = 1\n \n if method == 'random':\n self.weights_and_biases[f'W{i}'] = np.random.rand(self.architecture[i-1], self.architecture[i])*variance #randomised initialisation \n self.weights_and_biases[f'b{i}'] = np.zeros(self.architecture[i])*variance\n \n elif method == 'manual': #manual initialisation using given weights and biases\n self.weights_and_biases[f'W{i}'] = W[i-1]\n self.weights_and_biases[f'b{i}'] = b[i-1] \n return self.weights_and_biases", "def build_algorithm(self, algorithm_type):\n distance_matrix = self.matrix_handler.distance_matrix\n algorithm_execution_parameters = {}\n if algorithm_type == \"spectral\":\n # We need to set number of clusters for performance and we get sigma if defined\n algorithm_execution_parameters[\"max_clusters\"] = self.evaluation_parameters[\"maximum_clusters\"]\n if \"sigma\" in self.clustering_parameters[\"algorithms\"][\"spectral\"]:\n algorithm_execution_parameters[\"sigma_sq\"] = self.clustering_parameters[\"algorithms\"][\"spectral\"][\"sigma\"]\n # else it calculates its own sigma\n\n if algorithm_type in [\"spectral\",\"dbscan\",\"gromos\",\"kmedoids\",\"random\",\"hierarchical\"] :\n return ClusteringExplorer.get_clustering_algorithm_class()[algorithm_type](distance_matrix, **algorithm_execution_parameters)\n else:\n print \"[ERROR][ClusteringExplorer::build_algorithms] Not known algorithm type ( %s )\"%(algorithm_type)\n self.notify(\"SHUTDOWN\", \"Not known algorithm type ( %s )\"%(algorithm_type))\n exit()", "def gen_ap_def():\n\n while True:\n\n ap_params = [None, None]\n\n ap_params[0] = np.random.choice(OFF_OPTS, p=OFF_PROBS)\n ap_params[1] = np.random.choice(EXP_OPTS, p=EXP_PROBS)\n\n yield ap_params", "def init_algorithm(self):\n pass", "def __init__(self,head_offset=0,aquifer_type='unconfined',domain_center=0+0j,\r\n domain_radius=1,H = None,variables=[],priors=[],observations=[]):\r\n \r\n import numpy as np\r\n \r\n # Set potential scaling variables\r\n self.head_offset = head_offset\r\n self.aquifer_type = aquifer_type\r\n self.H = H\r\n \r\n # Set domain scaling variables\r\n self.domain_center = domain_center\r\n self.domain_radius = domain_radius\r\n \r\n if not np.isscalar(self.domain_center):\r\n self.domain_center = self.domain_center[0] + 1j*self.domain_center[1]\r\n \r\n # Check input for validity\r\n self.check_input()\r\n \r\n # Define a list for Analytic Elements\r\n self.elementlist = []\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n self.observations = observations\r\n \r\n # This function scrapes the model and its elements for unknown variables,\r\n # then gives this instance three new variables:\r\n # self.num_params Number of unknown variables\r\n # self.params List of unknown variables\r\n # self.param_names List of names of unknown variables\r\n # self.priors List of prior dictionaries for unknow variables\r\n self.take_parameter_inventory()\r\n \r\n self.linear_solver = False\r\n \r\n # Pre-allocate the function matrix and parameter vector for the linear solver\r\n self.matrix_solver = []\r\n self.params_vector = []", "def initOpt(self):\n\t\tself.optNodes=[]\n\t\tself.optNode=-1\n\t\tif self.m.headType=='Bracke':\n\t\t\tbracke=True #one head per decice\n\t\telse:\n\t\t\tbracke=False\n\t\tif '2a' in self.m.type:\n\t\t\t#this is strictly for 2000 plants/ha, i.e 10 spots per half circle and [4,9]m crane dimensions\n\t\t\tw1 = 1.3\n\t\t\tw2 = 1.0\n\t\t\tif self.mountPoint is 'left':\n\t\t\t\tfor r in [self.m.craneMaxL-w2, self.m.craneMinL+w2]:\n\t\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tdth=(pi-2*asin(w1/r))/3.\n\t\t\t\tth-=dth\n\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\tdth=(pi-asin(w1/r))/5. #outer\t\t\t\n\t\t\t\tth-=3*dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\telse:\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\tdth=(pi-asin(w1/r))/5. #outer\n\t\t\t\tth-=dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMinL+w2\n\t\t\t\tdth=(pi-2*asin(w1/r))/3.\n\t\t\t\tth=pi-asin(w1/r)-2.*dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=asin(w1/r)\n\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\telse:\n\t\t\tassert len(self.m.pDevs)==0 or len(self.m.pDevs)==1 and self.m.pDevs[0]==self\n\t\t\tw1 = self.plantAreaW/2.\n\t\t\tw2 = self.plantAreaL/2.\n\t\t\tif bracke:\n\t\t\t\tspaceMin=self.m.plantMinDist\n\t\t\telse:\n\t\t\t\tspaceMin=self.plantAreaW-self.plantHeads[0].width+self.m.plantMinDist #minimum spacing for angular movements.\n\t\t\tn=ceil(self.m.nSeedlingsPWArea/len(self.plantHeads)) #due to several plantHeads per device\n\t\t\tnLeft=n\n\t\t\tlInner = (self.m.craneMinL+w2)*(pi-2*asin(w1/(self.m.craneMinL+w2)))\n\t\t\tsLength = sqrt(pow(self.m.craneMaxL-w2,2)-pow(w1,2))-sqrt(pow(self.m.craneMinL+w2,2)-pow(w1,2))\n\t\t\tlOuter =(self.m.craneMaxL-w2)*(pi-2*asin(w1/(self.m.craneMaxL-w2)))\n\t\t\tlMiddle=0\n\t\t\trList=[self.m.craneMinL+w2, 'border', self.m.craneMaxL-w2]\n\t\t\tlTot=lInner+sLength+lOuter\n\t\t\trMiddle=-1\n\t\t\tdr=self.m.craneMaxL-w2-(self.m.craneMinL+w2)\n\t\t\tif dr>2*self.m.plantMinDist: #create another sweep\n\t\t\t\trMiddle=(self.m.craneMaxL-w2)-dr/2.\n\t\t\t\tlMiddle=rMiddle*(pi-2*asin(w1/rMiddle))\n\t\t\t\trList.append(rMiddle)\n\t\t\t\tlTot+=lMiddle\n\t\t\tlCurr=0\n\t\t\tfor r in rList:\n\t\t\t\tif r is 'border':\n\t\t\t\t\tr=self.m.craneMinL+w2\n\t\t\t\t\tL=sLength\n\t\t\t\t\tnSection=nLeft*(L/(lTot-lCurr))\n\t\t\t\t\t#dr=(L-2*dr)/nSection =>\n\t\t\t\t\tdr=L/(nSection+2.)\n\t\t\t\t\tif dr<self.m.plantMinDist: dr=self.m.plantMinDist\n\t\t\t\t\ta=0\n\t\t\t\t\twhile r<(self.m.craneMaxL-w2)-2*dr:\n\t\t\t\t\t\tr+=dr\n\t\t\t\t\t\tth=asin(w1/(r))\n\t\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\ta+=1\n\t\t\t\telse:\n\t\t\t\t\tL=r*(pi-2*asin(w1/r))\n\t\t\t\t\tnSection=nLeft*(L/(lTot-lCurr)) #how much to plant on this section\n\t\t\t\t\tdth=(pi-2*asin(w1/r))/nSection\n\t\t\t\t\tif dth*r < spaceMin: dth=spaceMin/r\n\t\t\t\t\tif r == self.m.craneMinL+w2 or r==rMiddle:\n\t\t\t\t\t\tdth=-dth\n\t\t\t\t\t\tth=pi-asin(w1/(r))\n\t\t\t\t\telse:\n\t\t\t\t\t\tth=asin(w1/(r))\n\t\t\t\t\ta=0\n\t\t\t\t\twhile abs(th-pi/2.)-0.00001<=(pi-2*asin(w1/r))/2.:\n\t\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tth+=dth\n\t\t\t\t\t\ta+=1\n\t\t\t\tif a<nSection: #if spaceMin got into it and interfered.\n\t\t\t\t\tnSection=a\n\t\t\t\tnLeft-=nSection\n\t\t\t\tlCurr+=L", "def __init__(self, epsilon=0.05, gamma=0.95, alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n PQLearningAgent.__init__(self, **args)", "def __init__(self,\n lower, upper ,\n fun ,\n max_std, min_std ,\n init_numb_trees = 10 ,\n max_numb_trees = 20 ,\n max_seeds = 10 ,\n min_seeds = 1 ,\n epsilon = 0.1 ,\n epsilon_decay = 0.0 ,\n max_iters = 100 ,\n mut_proba = 0.1 ,\n seed = None ,\n ):\n\n # generates a seed for the random number generator\n if (seed == None):\n self.seed = random.randint(0, 1000)\n else:\n self.seed = seed\n random.seed(self.seed)\n\n # assigns properties of FO algorithm\n self.max_number_trees = max_numb_trees\n self.max_seeds = max_seeds\n self.min_seeds = min_seeds\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.max_iters = max_iters\n self.max_std = max_std\n self.min_std = min_std\n self.mut_proba = mut_proba\n\n # assigns fitness function\n self.evaluate = fun\n\n # stores lower and upper bounds\n self.lower = lower\n self.upper = upper\n\n # evaluates dimension of the optimal problem\n assert ( len(lower)==len(upper) ), \\\n \"'lower' and 'upper' must be of the same dimension.\"\n self.dim = len(lower)\n\n # initialises a forest of trees\n self.population = []\n for _ in range(init_numb_trees):\n tree = Tree(lower, upper)\n if (fun != None):\n self.population.append((fun(tree.vector), tree))\n else:\n self.population.append((sys.float_info.max, tree))\n\n # initialises iterations counter\n self.iteration = 1\n\n # creates a seedlings buffer\n self.seedlings = []", "def __init__(self, **kwargs):\n\n # call base class constructor registering that this tool performs everything.\n Algorithm.__init__(\n self,\n performs_projection = True,\n use_projected_features_for_enrollment = True,\n requires_enroller_training = True\n )", "def __init__(self, average_disparity, frame_down_factor, mem_down_factor,\n fovea_shape, frame_shape, values,\n verbose=False, memory_length=1, max_n_foveas=1, **bp_args):\n self.verbose = verbose\n self.use_uncertainty = False\n self.n_past_fovea = 0\n\n# self.frame_down_factor = frame_down_factor\n self.mem_down_factor = mem_down_factor\n self.frame_step = 2**frame_down_factor\n self.mem_step = 2**mem_down_factor #step size for uncertainty and importance calculations (pixels)\n\n self.average_disparity = downsample(\n average_disparity, down_factor=mem_down_factor)\n self.frame_shape = frame_shape\n self.fovea_shape = fovea_shape\n self.memory_shape = self.average_disparity.shape\n\n self.values = values\n self.max_n_foveas = max_n_foveas\n\n # self.params = {\n # 'data_weight': 0.16145115747533928, 'disc_max': 294.1504935618425,\n # 'data_max': 32.024780646200725, 'laplacian_ksize': 3} # original hyperopt\n # self.params = {\n # 'data_weight': 0.15109941436798274, 'disc_max': 44.43671813879002,\n # 'data_max': 68.407170602610137, 'laplacian_ksize': 5} # hyperopt on 100 images\n # self.params = {\n # 'data_weight': 0.2715404479972163, 'disc_max': 2.603682635476145,\n # 'data_max': 156312.43116792402, 'laplacian_ksize': 3} # Bryan's hyperopt on 250 images\n # self.params = {\n # 'data_weight': 1.2, 'disc_max': 924.0,\n # 'data_max': 189.0, 'laplacian_ksize': 5} # random\n # self.params = {\n # 'data_weight': 0.16145115747533928, 'disc_max': 294.1504935618425,\n # 'data_max': 32.024780646200725, 'laplacian_ksize': 3} # coarse\n self.params = {\n 'data_exp': 1.09821084614, 'data_max': 112.191597317,\n 'data_weight': 0.0139569211273, 'disc_max': 12.1301410452,\n 'laplacian_ksize': 3, 'smooth': 1.84510833504e-07}\n # self.params = {\n # 'data_exp': 14.2348581842, 'data_max': 79101007093.4,\n # 'data_weight': 0.000102496570364, 'disc_max': 4.93508276126,\n # 'laplacian_ksize': 5, 'laplacian_scale': 0.38937704644,\n # 'smooth': 0.00146126755993} # optimized for frame_down: 1, mem_down: 2, fovea_levels: 1\n\n self.params.update(bp_args)\n\n self.disparity_memory = DisparityMemory(self.memory_shape, n=memory_length)\n self.uncertainty_memory = DisparityMemory(self.memory_shape, n=memory_length)\n self.fovea_memory = DisparityMemory(frame_shape, fovea_shape=fovea_shape, n=self.n_past_fovea)\n\n self._uc = UnusuallyClose(self.average_disparity)", "def __init__(self, epsilon=0.05,gamma=0.6, alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n F1QLearningAgent.__init__(self, **args)", "def optg(self,maxIters=900):\n mu = self.clone()\n optimizer = {'uff': AllChem.UFFOptimizeMolecule, \\\n 'mmff94':AllChem.MMFFOptimizeMolecule }[ self.forcefield.lower() ]\n if optimizer(mu, maxIters=maxIters, ignoreInterfragInteractions=False):\n print('FF OPTG failed')\n #c = mu.GetConformer()\n #if self.forcefield in ['mmff94',]:\n # mp = AllChem.MMFFGetMoleculeProperties(mu)\n # ff = AllChem.MMFFGetMoleculeForceField(mu, mp, \\\n # ignoreInterfragInteractions=False)\n #else:\n # ff = AllChem.UFFGetMoleculeForceField(mu, \\\n # ignoreInterfragInteractions=False)\n #ff.Minimize(maxIts=maxIters)\n coords_u = get_coords( mu )\n self.coords = coords_u\n self.update_coords(coords_u)\n #self.energy = ff.CalcEnergy()\n self.m = mu\n self.atoms = cc.atoms(self.zs, coords_u)\n self.iFFOpt = True\n self._ds = ssd.squareform( ssd.pdist(self.coords) )\n\n obj = NBody(mu, wH=F, key='ia')\n dangs = obj.dangs\n #angs = obj.angs\n iokg = True\n #if not hasattr(self, 'dangs0'):\n # raise Exception('you need to call optg_c() first??')\n for k in dangs:\n if abs(self.dangs0[k] - dangs[k]) > 60.:\n iokg = False\n break\n self.iokg = iokg", "def __init__(self,dna_size,pop_size,cross_rate,mutate_rate,eta_c,eta_m,mp_size,elitism,generations):\n self.dna_size=dna_size\n self.pop_size=pop_size\n self.cross_rate=cross_rate\n self.mutate_rate=mutate_rate\n self.eta_c=eta_c\n self.eta_m=eta_m\n self.mp_size=mp_size\n self.elitism=elitism\n self.generations=generations\n\n self.pop=None\n self.fitness=None\n self.fitness_rank=None\n self.mp=None\n self.offspring=None\n\n self.start_state=True\n self.iters=0", "def __init__(self, \n num_vars, \n num_hidden,\n training_inputs = None,\n algorithm = None,\n algorithm_dict = None,\n batch_size = None,\n use_momentum = None,\n W0= None, \n b0= None, \n bhid0 = None,\n zero_diag = True,\n symmetric = True,\n report_p_tilda =False,\n learn_biases = True,\n test_mode= False,\n training = True):\n \n self.num_vars = num_vars\n \n self.num_hidden = num_hidden\n \n self.batch_size = batch_size\n \n self.zero_diag = zero_diag\n \n self.algorithm = algorithm\n \n self.num_samples = 0\n \n self.num_u_gibbs = 0\n \n self.gibbs_steps = 0\n \n self.resample = False\n \n self.uniform = False\n \n self.mixture = False\n \n self.mix_params = []\n \n self.m_params = []\n \n self.mf_steps = 0\n \n self.alpha = 0\n \n self.learn_biases = learn_biases\n \n if isinstance(algorithm_dict, dict):\n \n for param in algorithm_dict.keys():\n \n if param == 'resample':\n \n self.resample = algorithm_dict[param]\n \n if param == 'mf_steps':\n \n self.mf_steps = algorithm_dict[param]\n \n if param == \"gibbs_steps\":\n \n self.gibbs_steps = algorithm_dict[param]\n \n if param == \"num_samples\":\n \n self.num_samples = algorithm_dict[param]\n \n if param == \"num_u_gibbs\":\n \n self.num_u_gibbs = algorithm_dict[param]\n \n if param == \"uniform\":\n \n self.uniform = algorithm_dict[param] \n \n if param == \"mixture\":\n \n self.mixture = algorithm_dict[param] \n \n if param == \"mix_params\":\n \n self.mix_params = algorithm_dict[param] \n \n if param == \"alpha\" and algorithm_dict[param] != None:\n #### alpha defines transition rate from\n #### uniform to mean-field distribution\n self.alpha = algorithm_dict[param] \n \n self.m_params = (1-self.alpha)*0.5*np.ones([1,self.num_vars])+\\\n self.alpha*np.mean(training_inputs,0)\n \n self.use_momentum = use_momentum\n \n self.report_p_tilda = report_p_tilda\n \n self.side = int(np.sqrt(self.num_vars))\n \n self.np_rand_gen = np.random.RandomState(1234)\n \n self.theano_rand_gen =\\\n theano.sandbox.rng_mrg.MRG_RandomStreams(self.np_rand_gen.randint(2**30))\n \n #self.theano_rand_gen =\\\n #T.shared_randomstreams.RandomStreams(self.np_rand_gen.randint(2**30))\n \n theano.config.exception_verbosity = 'high'\n \n self.node_indices = \\\n theano.shared(np.arange(self.num_vars), name=\"node_indices\")\n \n self.x = T.matrix('x')\n \n self.x_tilda = T.matrix('x_tilda')\n \n self.sampler_theta = T.matrix('sampler_theta')\n \n self.symmetric = symmetric\n \n if training:\n \n if self.num_hidden ==0:\n \n self.num_x2 = self.num_vars\n \n elif self.num_hidden > 0 :\n \n self.num_x2 = self.num_hidden\n \n self.updates = OrderedDict()\n \n self.N_train = training_inputs.shape[0]\n \n self.train_inputs = theano.shared(np.asarray(training_inputs,\n dtype=theano.config.floatX),\n borrow= True)\n \n self.learning_rate = T.dscalar('learning_rate')\n \n if self.mixture:\n \n print(\"Importance distribution was specified as mixture\"+\\\n \" of Bernoulli products\")\n \n if self.mix_params == []:\n print(\"Error: parameters defining mixture means were\"+\\\n \" not provided\")\n sys.exit()\n \n self.set_mixture_means(inputs = training_inputs)\n \n if use_momentum:\n \n print(\"Will add momentum term to gradient computations\")\n \n self.momentum = T.dscalar('learning_rate')\n \n self.grad_vec = {}\n \n self.grad_vec['W'] = theano.shared(np.zeros([self.num_vars, self.num_x2],\n dtype = theano.config.floatX), name = 'W_momentum', borrow = True)\n \n if self.num_hidden > 0:\n \n self.grad_vec['bhid'] = theano.shared(np.zeros([self.num_x2],\n dtype = theano.config.floatX), name = 'b_momentum', borrow = True)\n \n self.grad_vec['b'] = theano.shared(np.zeros([self.num_vars],\n dtype = theano.config.floatX), name = 'b_momentum', borrow = True)\n \n if test_mode:\n \n b_init =self.np_rand_gen.uniform(0,1, num_vars)\n \n W_init =self.np_rand_gen.uniform(0,1, size = (num_vars, num_vars))\n \n # also tested ones\n # b_init = np.ones(num_vars)\n \n # W_init = np.ones([num_vars, num_vars])\n \n self.b_init= np.asarray(b_init, dtype = theano.config.floatX)\n \n self.W_init= np.asarray(W_init, dtype = theano.config.floatX)\n \n self.b = theano.shared(self.b_init, name='b', borrow = False)\n \n self.W = theano.shared(self.W_init, name='W', borrow = False)\n \n print(\"Initialized with test mode\")\n \n else:\n \n if W0 is None:\n \n if self.num_hidden > 0:\n \n W0_init =\\\n self.np_rand_gen.uniform(\n -4*np.sqrt(6.0/(self.num_vars+self.num_hidden)),\\\n 4*np.sqrt(6.0 /(self.num_vars + self.num_hidden)), \n size = (num_vars, self.num_hidden)\n )\n \n W0 = np.asarray(W0_init, dtype = theano.config.floatX) \n \n if self.num_hidden == 0:\n \n # different W initializations: \n \n # W0_init =\\\n # self.np_rand_gen.uniform(-np.sqrt(3.0/(num_vars)),\\\n # np.sqrt(3.0 / (num_vars)), size = (num_vars, num_vars))\n \n # W0_init =\\\n # self.np_rand_gen.uniform(-0.00000001,\\\n # 0.00000001, size = (num_vars, num_vars))\n \n W0_init = 0.00000001*\\\n self.np_rand_gen.normal(size = (num_vars, self.num_x2)) \n \n W0 = np.asarray(W0_init, dtype = theano.config.floatX)\n \n if self.symmetric:\n \n W0 = (W0 + np.transpose(W0))/2.0\n \n if self.zero_diag:\n \n W0 = W0 - np.diag(np.diag(W0))\n \n self.W = theano.shared(value= W0, name='W', borrow=True)\n \n if self.num_hidden == 0:\n \n test_W = self.W.get_value() \n \n assert sum(np.diag(test_W)) == 0.0\n \n assert (test_W == np.transpose(test_W)).all() == True\n \n else:\n print(\"W is initialized with provided array\")\n self.W = theano.shared(value= W0, name='W', borrow=True)\n \n if b0 is None:\n \n bias_init = np.zeros(num_vars, dtype = theano.config.floatX)\n \n self.b = theano.shared(value= bias_init, name='b', borrow=True)\n \n else:\n print(\"b vector is initialized with provided vector\")\n self.b = theano.shared(value= b0, name='b', borrow=True)\n \n if bhid0 is None and self.num_hidden > 0:\n \n hbias_init = np.zeros(self.num_hidden, dtype = theano.config.floatX)\n \n self.bhid = theano.shared(value= hbias_init, name='bhid', borrow=True)\n \n elif (bhid0 is not None) and (self.num_hidden > 0):\n print(\"bhid vector is initialized with provided vector\") \n self.bhid = theano.shared(value= bhid0, name='bhid', borrow=True)\n \n self.theta = [self.W, self.b]\n \n if self.num_hidden > 0 :\n \n self.theta.append(self.bhid)\n \n self.train_set = set(range(self.N_train))\n \n self.minibatch_set = T.ivector('minibatch_set')\n \n self.sample_set = T.ivector('sample_set')\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n self.x_gibbs= theano.shared(np.ones([self.batch_size,self.num_vars],\n dtype=theano.config.floatX),\n borrow = True, name= \"x_gibbs\")\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n self.persistent_gibbs =\\\n theano.shared(np.ones([self.batch_size,self.num_hidden],\n dtype=theano.config.floatX),\n borrow = True, \n name= \"persistent_gibbs\")\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0:\n \n init_mf_vis = self.np_rand_gen.uniform(0, \n 1, \n size =(self.num_vars,1))\n \n init_mf_vis = np.asarray(init_mf_vis, dtype = theano.config.floatX)\n \n self.mf_vis_p = theano.shared(init_mf_vis, \n name= \"mf_vis_p\", \n borrow= True)\n \n if self.num_hidden > 0:\n \n init_mf_hid = \\\n self.np_rand_gen.uniform(0, 1, size =(self.num_hidden,1))\n \n init_mf_hid = np.asarray(init_mf_hid, \n dtype = theano.config.floatX)\n \n self.mf_hid_p = theano.shared(init_mf_hid, \n name= \"mf_hid_p\", \n borrow= True)\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0: \n \n if self.num_hidden ==0: \n self.x_gibbs= theano.shared(np.ones([self.batch_size,self.num_vars],\n dtype=theano.config.floatX),\n borrow = True, name= \"x_gibbs\")", "def train_hyperopt(params):\n lasagne.random.set_rng(RandomState(9859295))\n\n template_name = params.pop('template_name') \n params = adjust_params_for_hyperopt(params)\n \n config_strings = create_config_strings(template_name)\n config_objects = create_config_objects(config_strings)\n templates, _ = create_templates_variants_from_config_objects(\n config_objects)\n \n \n processed_templates, params_without_template_params = process_templates(\n templates, params)\n final_params = process_parameters_by_templates(params_without_template_params, \n processed_templates)\n \n # go to directory above this source-file\n main_template_filename = os.path.dirname(os.path.abspath(os.path.dirname(\n __file__)))\n # then complete path to config\n main_template_filename = os.path.join(main_template_filename, \"configs\", \n \"eegnet_template.yaml\")\n \n with (open(main_template_filename, 'r')) as main_template_file:\n main_template_str = main_template_file.read()\n \n \n final_params['original_params'] = 'dummy'\n train_str = Template(main_template_str).substitute(final_params)\n \n def do_not_load_constructor(loader, node):\n return None\n yaml.add_constructor(u'!DoNotLoad', do_not_load_constructor)\n modified_train_str = train_str.replace('layers: ', 'layers: !DoNotLoad ')\n train_dict = yaml_parse.load(modified_train_str) \n dataset = train_dict['dataset'] \n dataset.load()\n dataset_provider = train_dict['dataset_provider']\n \n assert 'in_sensors' in train_str\n assert 'in_rows' in train_str\n assert 'in_cols' in train_str\n \n train_str = train_str.replace('in_sensors',\n str(dataset.get_topological_view().shape[1]))\n train_str = train_str.replace('in_rows',\n str(dataset.get_topological_view().shape[2]))\n train_str = train_str.replace('in_cols', \n str(dataset.get_topological_view().shape[3]))\n \n train_dict = yaml_parse.load(train_str)\n layers = train_dict['layers']\n final_layer = layers[-1]\n\n # turn off debug/info logging\n logging.getLogger(\"pylearn2\").setLevel(logging.WARN)\n logging.getLogger(\"braindecode\").setLevel(logging.WARN)\n exp = Experiment()\n exp.setup(final_layer, dataset_provider, **train_dict['exp_args'])\n exp.run()\n final_misclass = exp.monitor_chans['test_misclass'][-1]\n print(\"Result for\")\n pprint(params)\n print(\"Final Test misclass: {:5.4f}\".format(float(final_misclass)))\n return final_misclass", "def __init__(self, algorithm, iters, **params):\n self.algorithm = algorithm\n self.bags = iters\n self.params = params", "def __init__(self):\n Algorithm.__init__(self)\n self.name = \"Otsus Threshold\"\n self.parent = \"Segmentation\"", "def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GALE_ProgressTrack\"\r\n outPop = \"GH_GALE_PopulationOut\"\r\n bitLength = 1 # This implementation is not yet set up to handle other rule representations, or bit encoding lengths.\r\n CVpartitions = 10\r\n trackCycles = 1\r\n \r\n iterInput = '5.10.20' \r\n xdim = 10\r\n ydim = 10\r\n dist = 2\r\n wild = 0.75\r\n prune = 1\r\n \r\n #Figure out the iteration stops for evaluation, and the max iterations.\r\n iterList = iterInput.split('.')\r\n for i in range(len(iterList)):\r\n iterList[i] = int(iterList[i])\r\n lastIter = iterList[len(iterList)-1] \r\n\r\n #Sets up up algorithm to be run.\r\n GALEConstants.setConstants(prune, wild)\r\n e = GALE_Environment(trainData,testData,bitLength)\r\n sampleSize = e.getNrSamples()\r\n gale = GALE(e, outProg, outPop, bitLength, CVpartitions, graphPerformance, xdim, ydim, dist)\r\n \r\n #Set some GALE parameters.\r\n if trackCycles == 'Default':\r\n gale.setTrackingIterations(sampleSize)\r\n else:\r\n gale.setTrackingIterations(trackCycles) \r\n gale.setNumberOfTrials(lastIter, iterList) \r\n \r\n #Run the GALE Algorithm \r\n gale.runGALE()", "def create_tuning_functions(self):\r\n\t\tmotion_tuning = np.zeros((par['num_motion_tuned'], par['num_receptive_fields'], par['num_motion_dirs']), dtype=np.float32)\r\n\t\tfix_tuning = np.zeros((par['num_fix_tuned'], par['num_receptive_fields']), dtype=np.float32)\r\n\t\trule_tuning = np.zeros((par['num_rule_tuned'], par['num_rules']), dtype=np.float32)\r\n\r\n\t\t# generate list of prefered directions\r\n\t\t# dividing neurons by 2 since two equal groups representing two modalities\r\n\t\tpref_dirs = np.arange(0,360,360/(par['num_motion_tuned']//par['num_receptive_fields'])).astype(np.float32)\r\n\r\n\t\t# generate list of possible stimulus directions\r\n\t\tstim_dirs = np.arange(0,360,360/par['num_motion_dirs']).astype(np.float32)\r\n\r\n\t\tfor n in range(par['num_motion_tuned']//par['num_receptive_fields']):\r\n\t\t\tfor i in range(len(stim_dirs)):\r\n\t\t\t\tfor r in range(par['num_receptive_fields']):\r\n\t\t\t\t\td = np.cos((stim_dirs[i] - pref_dirs[n])/180*np.pi)\r\n\t\t\t\t\tn_ind = n+r*par['num_motion_tuned']//par['num_receptive_fields']\r\n\t\t\t\t\tmotion_tuning[n_ind,r,i] = par['tuning_height']*np.exp(par['kappa']*d)/np.exp(par['kappa'])\r\n\r\n\t\tfor n in range(par['num_fix_tuned']):\r\n\t\t\tfor i in range(par['num_receptive_fields']):\r\n\t\t\t\tif n%par['num_receptive_fields'] == i:\r\n\t\t\t\t\tfix_tuning[n,i] = par['tuning_height']\r\n\r\n\t\tneurons_per_rule = par['num_rule_tuned']//par['num_rules']\r\n\t\tfor n in range(par['num_rule_tuned']):\r\n\t\t\tfor i in range(par['num_rules']):\r\n\t\t\t\tif n in range(i*neurons_per_rule, (i+1)*neurons_per_rule):\r\n\t\t\t\t\trule_tuning[n,i] = par['tuning_height']\r\n\r\n\r\n\t\treturn motion_tuning, fix_tuning, rule_tuning", "def run(self):\n if self.pp['net']:\n space = {\n # Qlearnnet\n 'net_lr': hp.loguniform('net_lr', np.log(5e-7), np.log(1e-4)),\n 'net_lr_decay': hp.loguniform('net_lr_decay', np.log(0.90), np.log(0.99)),\n # Singh\n # 'net_lr': hp.loguniform('net_lr', np.log(1e-7), np.log(5e-4)),\n 'beta': hp.uniform('beta', 16, 30),\n # Double\n 'net_copy_iter': hp.loguniform('net_copy_iter', np.log(5), np.log(150)),\n 'net_creep_tau': hp.loguniform('net_creep_tau', np.log(0.01),\n np.log(0.7)),\n # Exp. replay\n 'batch_size': scope.int(hp.uniform('batch_size', 8, 16)),\n 'buffer_size': scope.int(hp.uniform('buffer_size', 2000, 10000)),\n # N-step\n 'n_step': scope.int(hp.uniform('n_step', 3, 40)),\n # Policy\n 'vf_coeff': hp.uniform('vf_coeff', 0.005, 0.5),\n 'entropy_coeff': hp.uniform('entropy_coeff', 1.0, 100.0)\n }\n else:\n space = {\n 'beta': hp.uniform('beta', 7, 23),\n 'alpha': hp.uniform('alpha', 0.0001, 0.4),\n 'alpha_decay': hp.uniform('alpha_decay', 0.9999, 0.9999999),\n 'epsilon': hp.loguniform('epsilon', np.log(0.2), np.log(0.8)),\n 'epsilon_decay': hp.uniform('epsilon_decay', 0.9995, 0.9999999),\n 'gamma': hp.uniform('gamma', 0.7, 0.90),\n 'lambda': hp.uniform('lambda', 0.0, 1.0)\n }\n # Only optimize parameters specified in args\n space = {param: space[param] for param in self.pp['hopt']}\n if self.pp['hopt_fname'].startswith('mongo:'):\n self._hopt_mongo(space)\n else:\n self._hopt_pickle(space)", "def __init__(self, p, hyperpara, para, inst_name=\"\"):\n \n self.hyperpara = hyperpara\n self.para = para\n \n s = -np.log(-np.log(1.0 - p))\n \n k, cop = hyperpara\n \n # Sets colour\n colour = [\n None,\n None,\n {\n \"i\": 2,\n \"me\": 3},\n {\n \"i\": 0,\n \"me\": 1}][k][cop]\n \n if cop == \"i\":\n if k == 3:\n def f(X):\n q1, q2, q3 = X\n \n Y = np.array([q1, q2 - q1, q3 - q2])\n \n if np.any(Y <= 0.0):\n return None\n \n return -0.5 * np.sum(((Y - para[:, 0]) / para[:, 1]) ** 2)\n elif k == 2:\n def f(X):\n sigma, q1, q2 = X\n \n if sigma <= 0:\n return None\n \n Y = np.array([q1, q2 - q1])\n \n if np.any(Y <= 0.0):\n return None\n \n a = -0.5 * np.sum(((Y - para[:, 0]) / para[:, 1]) ** 2)\n \n return a - np.log(sigma)\n elif cop == \"me\":\n q_marg = [None for _ in range(k)]\n \n for i in range(k):\n dist = Normal(para[i, 0], para[i, 1])\n q_marg[i] = TruncatedDistribution(\n dist,\n 0.0,\n TruncatedDistribution.LOWER)\n \n ot = MaximumEntropyOrderStatisticsDistribution(q_marg)\n \n if k == 3:\n def f(X):\n q1, q2, q3 = X\n \n if np.any(np.array([q1, q2 - q1, q3 - q2]) <= 0.0):\n return None\n \n Y = ot.computePDF(X)\n \n if Y <= 0:\n return None\n \n return np.log(Y)\n elif k == 2:\n def f(X):\n sigma, q1, q2 = X\n \n if sigma <= 0 or q1 <= 0.0 or q2 <= q1:\n return None\n \n Y = ot.computePDF([q1, q2])\n \n if Y <= 0:\n return None\n \n return np.log(Y) - np.log(sigma)\n \n if k == 3:\n # Transformation (mu, theta, xi) -> (q1, q2, q3)\n def g(X):\n mu, sigma, xi = X\n \n if sigma <= 0:\n return None\n \n # When xi is close enough to 0, we consider it equal to 0\n if abs(xi) < 1e-300:\n q = mu + sigma * s\n else:\n q = mu + sigma * (np.exp(xi * s) - 1.0) / xi\n \n if q[0] < 0.0:\n return None\n return q\n \n \n # Log of determinant of g\n def g_det(X):\n mu, sigma, xi = X\n \n if abs(xi) < 1e-300:\n return np.log(sigma)\n \n e = np.exp(s * xi)\n \n sm = [\n s[i] * e[i] * (e[(i + 2) % 3] - e[(i + 1) % 3])\n for i in range(3)]\n \n return np.log(sigma) + np.log(sum(sm)) - np.log(xi ** 2.0)\n elif k == 2:\n # Transformation (mu, sigma, xi) -> (sigma, q1, q2)\n def g(X):\n mu, sigma, xi = X\n \n # When xi is close enough to 0, we consider it equal to 0\n if abs(xi) < 1e-300:\n q = mu + sigma * s\n else:\n q = mu + sigma * (np.exp(xi * s) - 1.0) / xi\n \n if q[0] < 0.0:\n return None\n \n return np.concatenate(([sigma], q))\n \n \n # Log of determinant of g\n def g_det(X):\n mu, sigma, xi = X\n \n if abs(xi) < 1e-300:\n return np.log(sigma)\n \n e = (s * xi - 1.0) * np.exp(s * xi)\n \n f = np.log(abs(e[0] - e[1]))\n \n return np.log(sigma) + f - np.log(xi ** 2.0)\n \n super().__init__(\n util.log_transform(f, g, g_det),\n colour=colour,\n inst_name=inst_name)\n \n if k == 2:\n self.prior[\"proper\"] = False", "def main(ngrains=100,sigma=15.,c2a=1.6235,mu=0.,\n prc='cst',isc=False,tilt_1=0.,\n tilts_about_ax1=0.,tilts_about_ax2=0.):\n if isc:\n h = mmm()\n else:\n h=np.array([np.identity(3)])\n gr = []\n for i in range(ngrains):\n dth = random.uniform(-180., 180.)\n if prc=='cst': g = gen_gr_fiber(th=dth,sigma=sigma,mu=mu,tilt=tilt_1,iopt=0) # Basal//ND\n elif prc=='ext': g = gen_gr_fiber(th=dth,sigma=sigma,mu=mu,tilt=tilt_1,iopt=1) # Basal//ED\n else:\n raise IOError('Unexpected option')\n for j in range(len(h)):\n temp = np.dot(g,h[j].T)\n\n ## tilts_about_ax1\n if abs(tilts_about_ax1)>0:\n g_tilt = rd_rot(tilts_about_ax1)\n temp = np.dot(temp,g_tilt.T)\n ## tilts_about_ax2?\n elif abs(tilts_about_ax2)>0:\n g_tilt = td_rot(tilts_about_ax2)\n temp = np.dot(temp,g_tilt.T)\n elif abs(tilts_about_ax2)>0 and abs(tilts_about_ax2)>0:\n raise IOError('One tilt at a time is allowed.')\n\n phi1,phi,phi2 = euler(a=temp, echo=False)\n gr.append([phi1,phi,phi2,1./ngrains])\n\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,2],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n return np.array(gr)", "def main(rand,mu,lamb,cxpb,mutpb,ngen,param):\n \n random.seed(rand)\n NGEN = ngen\n MU = mu\n LAMBDA = lamb\n CXPB = cxpb\n MUTPB = mutpb\n \n # Used for printing the results. It is the parameter that is changed one run from another\n if param==\"rand\" or param==\"optimal\":\n list_results=[rand]\n elif param==\"mu\":\n list_results=[mu]\n elif param==\"lamb\":\n list_results=[lamb]\n elif param==\"cross\":\n list_results=[cxpb]\n elif param==\"mutate\":\n list_results=[mutpb]\n elif param==\"ngen\":\n list_results=[ngen]\n elif param==\"original\":\n list_results=[0]\n \n # Initialization of the objects for the GA\n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean, axis=0)\n stats.register(\"std\", np.std, axis=0)\n stats.register(\"min\", np.min, axis=0)\n stats.register(\"max\", np.max, axis=0)\n\n # Run of the GA\n p,logbook=algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,\n halloffame=hof,verbose=0)\n with open(results_path+param+'_logbook.csv', 'a',newline='') as f:\n w = csv.DictWriter(f, logbook[0].keys())\n w.writeheader()\n for el in logbook:\n w.writerow(el)\n w.writerow({})\n \n # Takes the max fitness of the population from all of the runs\n max_fit=0\n max_gen=0\n for elt in logbook:\n if elt['max'][0]>max_fit:\n max_fit=elt['max'][0]\n max_gen=elt['gen']\n list_results.append(max_fit)\n list_results.append(max_gen)\n \n #TODO\n# for ind in hof:\n# dist = numpy.linalg.norm(a-b)\n\n print (\"{0} {1} {2} {3}\".format(round(list_results[1],3),round(list_results[2],3),round(list_results[0],3),hof[0]))\n current_out_writer.writerow([list_results[0],list_results[1],list_results[2],hof[0]])\n \n return pop, stats, hof", "def __init__(self, state_size, action_size, args,\n agent_count = 1,\n l2_decay = 0.0001):\n\n self.framework = \"D4PG\"\n self.device = args.device\n self.eval = args.eval\n\n self.actor_learn_rate = args.actor_learn_rate\n self.critic_learn_rate = args.critic_learn_rate\n self.gamma = args.gamma\n self.rollout = args.rollout\n self.num_atoms = args.num_atoms\n self.vmin = args.vmin\n self.vmax = args.vmax\n self.atoms = torch.linspace(self.vmin,\n self.vmax,\n self.num_atoms).to(self.device)\n self.atoms = self.atoms.unsqueeze(0)\n\n # Initialize ACTOR networks #\n self.actor = ActorNet(args.layer_sizes,\n state_size,\n action_size).to(self.device)\n self.actor_target = ActorNet(args.layer_sizes,\n state_size,\n action_size).to(self.device)\n self.actor_optim = optim.Adam(self.actor.parameters(),\n lr=self.actor_learn_rate,\n weight_decay=l2_decay)\n\n # Initialize CRITIC networks #\n c_input_size = state_size * agent_count\n c_action_size = action_size * agent_count\n self.critic = CriticNet(args.layer_sizes,\n c_input_size,\n c_action_size,\n self.num_atoms).to(self.device)\n self.critic_target = CriticNet(args.layer_sizes,\n c_input_size,\n c_action_size,\n self.num_atoms).to(self.device)\n self.critic_optim = optim.Adam(self.critic.parameters(),\n lr=self.critic_learn_rate,\n weight_decay=l2_decay)", "def initialize(N0,L,Nt,pflag):\n qmax,qnet,enet = net.generate(N0,L,Nt)\n N = (N0+Nt)\n #generate initial conditions. all nodes have S,E,C=(1,0,0)\n #except the infected node which has S,E,C=(0.1,0.05,0.05)\n init = np.zeros(3*N)\n for i in range(N):\n init[i] = 1\n #the highest degree node is infected\n infnode = qnet.argmax(axis=0)+1\n init[infnode-1] = 0.1\n init[N+infnode-1] = 0.05\n init[2*N+infnode-1] = 0.05\n if (pflag==True):\n #compute the transport matrix from the degree vector\n #and the adjacency matrix\n A = net.adjacency_matrix(N,enet)\n P = np.zeros([N,N])\n for j in range(N):\n for i in range(N):\n P[i,j] = qnet[i]*A[i,j]\n P[:,j] = P[:,j]/sum(P[:,j])\n return init,infnode,P\n return init,infnode", "def main(**args):\n env = gym.make(args.pop('env'))\n\n ac_args = {'hidden_size': [64, 64], 'size': 2}\n\n # Discriminator approximators\n disc_args = {\n 'g_args': {\n 'hidden_layers': [32, 1],\n 'size': 1,\n 'activation': nn.Identity\n },\n 'h_args': {\n 'hidden_layers': [32, 32, 1],\n 'size': 2,\n 'activation': nn.LeakyReLU\n }\n }\n\n ac_args.update(**disc_args)\n\n train_args = {\n 'pi_train_n_iters': 80,\n 'disc_train_n_iters': 40,\n 'max_kl': args.pop('target_kl') or 1.,\n 'kl_start': 20,\n 'entropy_reg': .1,\n 'clip_ratio': .2,\n 'max_eps_len': 150,\n 'real_label': 1,\n 'pi_label': 0\n }\n agent_args = {\n 'n_epochs': args.pop('epochs') or 250,\n 'env_name': '', # 'b_10000_plr_.1e-4',\n 'steps_per_epoch': 10000\n }\n\n all_args = {\n 'ac_args': ac_args,\n 'pi_lr': 2e-4,\n 'disc_lr': 1e-4,\n 'gamma': .99,\n 'buffer_size': int(1e6),\n **agent_args,\n **train_args,\n **{k: v\n for k, v in args.items() if v}\n }\n\n airl(env, **all_args)", "def __init__(self, random_org_parameters, composition_space, constraints):\n\n self.name = 'random organism creator'\n\n # defaults\n #\n # number of random organisms to make (only used for epa searches)\n self.default_number = 28\n # max number of atoms\n if composition_space.objective_function == 'epa':\n # make sure we can sample cells with two formula units\n target_number = constraints.min_num_atoms + 6\n num_formulas = target_number/composition_space.endpoints[\n 0].num_atoms\n if num_formulas < 2:\n min_of_max = int(2*composition_space.endpoints[0].num_atoms)\n else:\n min_of_max = int(round(\n num_formulas)*composition_space.endpoints[0].num_atoms)\n else:\n min_of_max = constraints.min_num_atoms + 6\n self.default_max_num_atoms = min(min_of_max, constraints.max_num_atoms)\n # allow structure with compositions at the endpoints (for pd searches)\n self.default_allow_endpoints = True\n # volume scaling behavior\n # default volumes per atom of elemental ground state structures\n # computed from structures on materials project (materialsproject.org)\n self.all_default_vpas = {'H': 13.89, 'He': 15.79, 'Li': 20.12,\n 'Be': 7.94, 'B': 7.25, 'C': 10.58,\n 'N': 42.73, 'O': 13.46, 'F': 16.00,\n 'Ne': 19.93, 'Na': 37.12, 'Mg': 23.04,\n 'Al': 16.47, 'Si': 20.44, 'P': 23.93,\n 'S': 36.03, 'Cl': 34.90, 'Ar': 44.87,\n 'K': 73.51, 'Ca': 42.42, 'Sc': 24.64,\n 'Ti': 17.11, 'V': 13.41, 'Cr': 11.57,\n 'Mn': 11.04, 'Fe': 11.55, 'Co': 10.92,\n 'Ni': 10.79, 'Cu': 11.82, 'Zn': 15.56,\n 'Ga': 20.34, 'Ge': 23.92, 'As': 22.45,\n 'Se': 38.13, 'Br': 37.53, 'Kr': 65.09,\n 'Rb': 90.44, 'Sr': 54.88, 'Y': 32.85,\n 'Zr': 23.50, 'Nb': 18.31, 'Mo': 15.89,\n 'Tc': 14.59, 'Ru': 13.94, 'Rh': 14.25,\n 'Pd': 15.45, 'Ag': 18.00, 'Cd': 23.28,\n 'In': 27.56, 'Sn': 36.70, 'Sb': 31.78,\n 'Te': 35.03, 'I': 50.34, 'Xe': 83.51,\n 'Cs': 116.17, 'Ba': 63.64, 'Hf': 22.50,\n 'Ta': 18.25, 'W': 16.19, 'Re': 15.06,\n 'Os': 14.36, 'Ir': 14.55, 'Pt': 15.72,\n 'Au': 18.14, 'Hg': 31.45, 'Tl': 31.13,\n 'Pb': 32.30, 'Bi': 36.60, 'La': 37.15,\n 'Ce': 26.30, 'Pr': 36.47, 'Nd': 35.44,\n 'Pm': 34.58, 'Sm': 33.88, 'Eu': 46.28,\n 'Gd': 33.33, 'Tb': 32.09, 'Dy': 31.57,\n 'Ho': 31.45, 'Er': 30.90, 'Tm': 30.30,\n 'Yb': 40.45, 'Lu': 29.43, 'Ac': 45.52,\n 'Th': 32.03, 'Pa': 25.21, 'U': 19.98,\n 'Np': 18.43, 'Pu': 18.34}\n\n self.default_vpas = self.get_default_vpas(composition_space)\n\n # set to defaults\n if random_org_parameters in (None, 'default'):\n self.number = self.default_number\n self.max_num_atoms = self.default_max_num_atoms\n self.allow_endpoints = self.default_allow_endpoints\n self.vpas = self.default_vpas\n # parse the parameters and set to defaults if necessary\n else:\n # the number to make\n if 'number' not in random_org_parameters:\n self.number = self.default_number\n elif random_org_parameters['number'] in (None, 'default'):\n self.number = self.default_number\n else:\n self.number = random_org_parameters['number']\n\n # the max number of atoms\n if 'max_num_atoms' not in random_org_parameters:\n self.max_num_atoms = self.default_max_num_atoms\n elif random_org_parameters['max_num_atoms'] in (None, 'default'):\n self.max_num_atoms = self.default_max_num_atoms\n elif random_org_parameters['max_num_atoms'] > \\\n constraints.max_num_atoms:\n print('The value passed to the \"max_num_atoms\" keyword in the '\n 'InitialPopulation block may not exceed the value passed'\n ' to the \"max_num_atoms\" keyword in the Constraints '\n 'block.')\n print('Quitting...')\n quit()\n elif random_org_parameters['max_num_atoms'] < \\\n constraints.min_num_atoms:\n print('The value passed to the \"max_num_atoms\" keyword in the '\n 'InitialPopulation block may not be smaller than the '\n 'value passed to the \"min_num_atoms\" keyword in the '\n 'Constraints block.')\n print('Quitting...')\n quit()\n else:\n self.max_num_atoms = random_org_parameters['max_num_atoms']\n\n # allowing composition space endpoints (only used for pd searches)\n if 'allow_endpoints' not in random_org_parameters:\n self.allow_endpoints = self.default_allow_endpoints\n elif random_org_parameters['allow_endpoints'] in (None, 'default'):\n self.allow_endpoints = self.default_allow_endpoints\n else:\n self.allow_endpoints = random_org_parameters['allow_endpoints']\n\n # volume scaling\n self.vpas = self.default_vpas\n if 'volumes_per_atom' not in random_org_parameters:\n pass\n elif random_org_parameters['volumes_per_atom'] in (None,\n 'default'):\n pass\n else:\n # replace the specified volumes per atom with the given values\n for symbol in random_org_parameters['volumes_per_atom']:\n self.vpas[symbol] = random_org_parameters[\n 'volumes_per_atom'][symbol]\n\n self.num_made = 0 # number added to initial population\n self.is_successes_based = True # it's based on number added\n self.is_finished = False", "def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)", "def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)", "def main():\n create_sets()\n optimal_weights = genetic_algorithm()\n obtain_best_model(optimal_weights)", "def __init__(self, args, number_of_labels, number_of_features,adj):\n super(SpGAT, self).__init__()\n self.args=args\n \n self.number_of_labels = number_of_labels\n self.number_of_features = number_of_features\n self.device = args.device\n self.adj= sparse_mx_to_torch_sparse_tensor(adj).to(self.device).to_dense()\n self.attentions = [SpGraphAttentionLayer(number_of_features, \n args.hidden, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=True) for _ in range(args.nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(args.hidden * args.nheads, \n args.Q, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=False)", "def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)", "def setup_method(self):\n self.x0 = (1.0, [1.0, 1.0])\n self.sol = (-0.195, np.array([-0.195, -0.1]))\n\n self.tol = 3 # number of decimal places\n\n self.niter = 100\n self.disp = False\n\n # fix random seed\n np.random.seed(1234)\n\n self.kwargs = {\"method\": \"L-BFGS-B\", \"jac\": True}\n self.kwargs_nograd = {\"method\": \"L-BFGS-B\"}", "def init_P_PHI_GIVEN_A():\n global P_PHI_GIVEN_A\n for k in TRUE_TASK_ACTION_TO_INTERFACE_ACTION_MAP.keys(): # task level action\n P_PHI_GIVEN_A[k] = collections.OrderedDict()\n for u in INTERFACE_LEVEL_ACTIONS:\n if u == TRUE_TASK_ACTION_TO_INTERFACE_ACTION_MAP[k]:\n # try to weight the true command more for realistic purposes. Can be offset by using a high PHI_GIVEN_A_NOISE\n P_PHI_GIVEN_A[k][u] = 1.0\n else:\n # P_PHI_GIVEN_A[k][u] = np.random.random()*PHI_GIVEN_A_NOISE #IF PHI_GIVEN_A_NOISE is 0, then the p(ui|a) is a deterministic mapping\n P_PHI_GIVEN_A[k][u] = 0.0\n\n delta_dist = np.array(P_PHI_GIVEN_A[k].values())\n uniform_dist = (1.0 / len(INTERFACE_LEVEL_ACTIONS)) * np.ones(len(INTERFACE_LEVEL_ACTIONS))\n blended_dist = (1 - PHI_GIVEN_A_NOISE) * delta_dist + PHI_GIVEN_A_NOISE * uniform_dist # np.array\n for index, u in enumerate(INTERFACE_LEVEL_ACTIONS):\n P_PHI_GIVEN_A[k][u] = blended_dist[index]", "def run_genetic_algorithm(bayes_params):\n\n print('Running genetic algorithm')\n\n # Unpacks parameters (unfortunately can't feed dataframe (or series or\n # array) data into a function with hyperopt, so am having to pickle the\n # parameters not being optimised with hyperopt\n params_file = '{}/Program_input/Input_params.pkl'.format(\n bayes_params['workingdirectory']\n )\n with open(params_file, 'rb') as f:\n fixed_params = pickle.load(f)\n if not type(fixed_params) in [dict, OrderedDict]:\n raise TypeError('Data in {} is not a pickled dictionary'.format(params_file))\n params = {**bayes_params, **fixed_params}\n\n # Records sequences and their fitnesses after each generation\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'w') as f:\n f.write('Tracking GA optimisation progress\\n')\n\n ga_calcs = run_ga_calcs(params)\n\n # Defines whether sequences are compared by their raw or rank propensities.\n # Since BUDE scores and frequency values have to be compared by their rank\n # values, have made the decision to also compare propensity values by their\n # rankings.\n \"\"\"\n if params['matingpopmethod'] in ['fittest', 'roulettewheel']:\n raw_or_rank = 'raw'\n elif params['matingpopmethod'] in ['rankroulettewheel']:\n raw_or_rank = 'rank'\n \"\"\"\n raw_or_rank = 'rank'\n\n # Calculates propensity and/or BUDE energy of input structure\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('Input structure\\n')\n\n if params['fitnessscoremethod'] == 'alternate':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score,'\n ' BUDE energy, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency,\n params['inputpdbenergy'], params['inputpdbclash']\n ))\n f.write('\\n')\n\n if params['fitnessscoremethod'] == 'propensity':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, BUDE energy\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'molprobity':\n network_clashes = ga_calcs.measure_fitness_clashscore(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clashscore = network_clashes[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, clashscore))\n f.write('\\n')\n\n # Runs GA cycles\n gen = params['startgen']\n while gen < params['stopgen']:\n gen += 1\n print('Generation {}'.format(gen))\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nGeneration {}\\n'.format(gen))\n\n\n all_networks_list = [params['sequencesdict']]\n pop_sizes = [params['populationsize']]\n\n for index, networks_dict in enumerate(all_networks_list):\n # Measures fitness of sequences in starting population.\n if (\n (params['fitnessscoremethod'] == 'propensity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 2 == 1)\n ):\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(networks_dict)\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, propensity, frequency, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency, probability\n ))\n f.write('Total: {}, {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n elif (\n (params['fitnessscoremethod'] == 'allatom')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 2)\n ):\n # Runs BUDE energy scoring on parallel processors\n network_energies = ga_calcs.measure_fitness_allatom(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, BUDE score, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, energy, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_energies.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n elif (\n (params['fitnessscoremethod'] == 'molprobity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 0)\n ):\n # Runs MolProbity scoring on parallel processors\n network_clashes = ga_calcs.measure_fitness_clashscore(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_clashscores_to_probabilities(network_clashes)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, clashscore, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clash = network_clashes[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, clash, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_clashes.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n # Selects subpopulation for mating\n if params['matingpopmethod'] == 'fittest':\n mating_pop_dict = ga_calcs.create_mat_pop_fittest(\n networks_dict, network_fitness_scores, pop_sizes[index],\n params['unfitfraction']\n )\n elif params['matingpopmethod'] in ['roulettewheel', 'rankroulettewheel']:\n mating_pop_dict = ga_calcs.create_mat_pop_roulette_wheel(\n networks_dict, network_fitness_scores, pop_sizes[index], params['']\n )\n\n # Performs crossover of parent sequences to generate child sequences\n if params['crossovermethod'] == 'uniform':\n crossover_pop_dict = ga_calcs.uniform_crossover(mating_pop_dict)\n elif params['crossovermethod'] == 'segmented':\n crossover_pop_dict = ga_calcs.segmented_crossover(mating_pop_dict)\n\n # Mutates child sequences\n if params['mutationmethod'] == 'swap':\n mutated_pop_dict = ga_calcs.swap_mutate(crossover_pop_dict)\n elif params['mutationmethod'] == 'scramble':\n mutated_pop_dict = ga_calcs.scramble_mutate(crossover_pop_dict)\n\n # Combines parent and child sequences into single generation\n merged_networks_dict = ga_calcs.add_children_to_parents(\n mutated_pop_dict, mating_pop_dict\n )\n\n random_order = [n for n in range(len(merged_networks_dict))]\n random.shuffle(random_order)\n shuffled_merged_networks_dict = OrderedDict(\n {list(merged_networks_dict.keys())[n]:\n list(merged_networks_dict.values())[n] for n in random_order}\n )\n params['sequencesdict'] = shuffled_merged_networks_dict\n\n # Calculates fitness of output sequences and filters population to maintain\n # the fittest 50%, plus sums the probabilities of the retained sequences and\n # returns this value (to be minimised with hyperopt)\n summed_fitness = 0\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nOutput generation\\n')\n\n if params['fitnessscoremethod'] != 'allatom':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['sequencesdict'])\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['sequencesdict'])\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their associated\n # fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n if params['fitnessscoremethod'] != 'allatom':\n f.write('network, sequence, propensity, frequency\\n')\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('network, sequence, BUDE score\\n')\n for network, G in params['sequencesdict'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n if params['fitnessscoremethod'] != 'allatom':\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n if params['fitnessscoremethod'] != 'allatom':\n f.write('Total: {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values())\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('Total: {}'.format(sum(network_energies.values())))\n f.write('\\n')\n\n params['sequencesdict'] = ga_calcs.create_mat_pop_fittest(\n params['sequencesdict'], network_fitness_scores,\n params['populationsize'], unfit_fraction=0\n )\n\n for network in params['sequencesdict'].keys():\n # Higher propensity is more likely, so add because output from\n # measure_fitness_propensity is sum of -log(propensity) values, and\n # hyperopt minimises output score\n # Can't combine propensity and frequency scores without first converting\n # to a probability, so for calculating output combined fitness can only\n # use combined propensity scores to rank the structures\n if params['fitnessscoremethod'] != 'allatom':\n summed_fitness += network_propensity_scores[network]\n # Lower score is more likely, so add because hyperopt minimises output\n # score\n elif params['fitnessscoremethod'] == 'allatom':\n summed_fitness += network_energies[network]\n\n with open('{}/Program_output/GA_output_sequences_dict.pkl'.format(\n bayes_params['workingdirectory']), 'wb') as f:\n pickle.dump(params['sequencesdict'], f)\n\n print(summed_fitness)\n\n return summed_fitness", "def __init__(self, params, diam=1, aggressive=True):\n defaults = dict(diam=diam, aggressive=aggressive)\n super(MirrorDescent, self).__init__(params, defaults)", "def __init__(self, population=25, initSampling='lhc', fracMutation=0.2, fracElite=0.2, fracLevy=1.0, alpha=0.5, gamma=1, n=1, scalingFactor=10.0, penalty=0.0, maxGens=20000, maxFevals=200000, convTol=1e-06, stallLimit=10000, optConvTol=0.01, **kwargs):\n ProblemParameters_multi.__init__(self, **kwargs)\n self.population = population\n self.initSampling = initSampling\n self.fracMutation = fracMutation\n assert self.fracMutation >= 0 and self.fracMutation <= 1, 'The probability of discovery must exist on (0,1]'\n self.fracElite = fracElite\n assert self.fracElite >= 0 and self.fracElite <= 1, 'The elitism fraction must exist on (0,1]'\n self.fracLevy = fracLevy\n assert self.fracLevy >= 0 and self.fracLevy <= 1, 'The probability that a Levy flight is performed must exist on (0,1]'\n self.alpha = alpha\n self.gamma = gamma\n self.n = n\n self.scalingFactor = scalingFactor\n self.penalty = penalty\n self.maxGens = maxGens\n self.maxFevals = maxFevals\n self.convTol = convTol\n self.stallLimit = stallLimit\n self.optConvTol = optConvTol", "def __top_Algs_ ( self ) :\n _algs = self.TopAlg\n\n def _alg_name_ ( _n ):\n\n _p = _n.rfind('/')\n if 0 > _p : return _n\n return _n[_p:]\n\n def _pyAlg ( _n ) :\n for _a in self.pyalgorithms :\n if _n == _a.name() : return _a\n return None \n \n algs = [] \n for _a in _algs :\n # get the proper name \n _n = _alg_name_ ( _a )\n # check if it is pyalgorithm:\n _pa = _pyAlg ( _n )\n if _pa :\n algs += [ _pa ]\n else :\n _alg = self.algorithm ( _a , True )\n algs += [ _alg ]\n \n return algs", "def __top_Algs_ ( self ) :\n _algs = self.TopAlg\n\n def _alg_name_ ( _n ):\n\n _p = _n.rfind('/')\n if 0 > _p : return _n\n return _n[_p:]\n\n def _pyAlg ( _n ) :\n for _a in self.pyalgorithms :\n if _n == _a.name() : return _a\n return None \n \n algs = [] \n for _a in _algs :\n # get the proper name \n _n = _alg_name_ ( _a )\n # check if it is pyalgorithm:\n _pa = _pyAlg ( _n )\n if _pa :\n algs += [ _pa ]\n else :\n _alg = self.algorithm ( _a , True )\n algs += [ _alg ]\n \n return algs", "def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )", "def __init_finalaf(self, i,h1,classes):\n self.params['W'+i]=np.random.randn(h1,classes)*self.weight_scale\n self.params['b'+i]=np.zeros(classes)", "def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms", "def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))", "def Optimizer(r_grasp,PAM_r, PAM_s, object_s, object_f, object_params, phi, r_max, walls, obstacles, obstacles_PAM, current_leg, n, n_p, v_max, force_max, legs, dt):\n global action_push_pull, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned\n # assigning cost of changing from one leg to another based on the distance to the desired pose\n cost_ChangeLeg = 1\n dz_final = np.sqrt((object_s.x - object_f.x) ** 2 + (object_s.y - object_f.y) ** 2)\n if dz_final < 1:\n cost_ChangeLeg = 10\n elif dz_final < 2:\n cost_ChangeLeg = 20\n else:\n cost_ChangeLeg = 10\n\n # assigning weight for cost of predicted repositioning and cost of robot motion\n w_cost_reposition = 40\n w_cost_motion = 10\n\n # finding object's leg cordinates\n object_leg = find_corners(object_s.x, object_s.y, object_s.phi, object_params[7], object_params[8])\n\n # initialization (initializeing cost to infinity)\n cost = [float('inf'), float('inf'), float('inf'), float('inf')]\n cost_legchange = [0, 0, 0, 0]\n cost_PAM = [[0, 0],[0, 0],[0, 0],[0, 0]]\n cost_manipulation = [0, 0, 0, 0]\n cost_motion = [0, 0, 0, 0]\n force = [0, 0, 0, 0]\n path = [[[], []], [[], []], [[], []], [[], []]]\n planned_path_w = [[],[],[],[]]\n PAM_g = [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]]\n command = [[], [], [], []]\n des = [[], [], [], [], []]\n PAM_goal = state()\n\n # find the nominal trajectory for manipulation\n theta = nominal_traj([object_s.x,object_s.y,object_s.phi], [object_f.x,object_f.y,object_f.phi], v_max, walls, obstacles, n, dt)\n\n # itterate through each leg to find the leg with minimum cost\n for leg in range(4):\n phi_linear = theta\n psi_linear = [theta[k] + phi[leg] for k in range(len(theta))]\n \t# find the cost and required force for manipulation for the leg\n force[leg], cost_manipulation[leg], planned_path_w[leg], command[leg], des= OptTraj([object_s.x, object_s.y, object_s.phi, object_s.xdot, object_s.ydot, object_s.phidot], [object_f.x, object_f.y, object_f.phi, object_f.xdot, object_f.ydot, object_f.phidot], v_max, walls, obstacles, object_params[0:4], object_params[4:7], phi_linear, psi_linear, force_max, r_max[leg], n, dt, object_leg[leg])\n \t# adding cost of changing leg\n if leg != current_leg:\n cost_legchange[leg] = cost_ChangeLeg\n # adding cost of PAM motion to PAM goal pose\n phi0 = np.arctan2(object_leg[leg][1]-object_s.y,object_leg[leg][0]-object_s.x)\n # finding the better option between pulling and pushing for each leg, with the same manipulation plan\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost_PAM[leg][push_pull], path[leg][push_pull], command_pam, goal_orientation = OptPath([PAM_s.x, PAM_s.y, PAM_s.phi], PAM_g[leg][push_pull], walls, obstacles_PAM, n_p, dt)\n if cost_PAM[leg][push_pull]!= float(\"inf\"):\n PAM_s_sim = copy.deepcopy(PAM_s)\n PAM_s_sim.x, PAM_s_sim.y, PAM_s_sim.phi = [PAM_r * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], PAM_r * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n # adding cost of predicted re-positionings\n n_transition = traj_simulation(copy.deepcopy(PAM_s_sim), copy.deepcopy(object_s), force[leg], legs, leg, command[leg])\n # print(n_transition)\n cost_PAM[leg][push_pull] += w_cost_reposition*n_transition\n cost_motion[leg] += min(cost_PAM[leg])*w_cost_motion\n action_push_pull[leg] = np.argmin(cost_PAM[leg])\n else:\n phi0 = np.arctan2(force[leg][0][1], force[leg][0][0])\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost = [cost_legchange[leg] + cost_motion[leg] + cost_manipulation[leg] for leg in range(4)]\n\n if min(cost) < float(\"inf\"):\n \t[min_index, min_value] = [np.argmin(cost), min(cost)]\n \t# Finding the grasping goal pose based on the selected plan\n \tphi0 = np.arctan2(object_leg[min_index][1]-object_s.y,object_leg[min_index][0]-object_s.x)\n \tgrasping_goal = [PAM_r * np.cos(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][0], PAM_r * np.sin(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][1], np.pi * action_push_pull[min_index] + phi0]\n \tPAM_goal = state()\n \tPAM_goal.x, PAM_goal.y, PAM_goal.phi = PAM_g[min_index][action_push_pull[min_index]]\n \tobject_path_planned = Path()\n \tobject_path_planned.header.frame_id = 'frame_0'\n \tfor i in range(len(planned_path_w[min_index])):\n \t\tpose = PoseStamped()\n \t\tpose.pose.position.x = planned_path_w[min_index][i][0]\n \t\tpose.pose.position.y = planned_path_w[min_index][i][1]\n \t\tpose.pose.position.z = 0\n \t\tobject_path_planned.poses.append(pose)\n\n \tPAM_path_planned = Path()\n \tPAM_path_planned.header.frame_id = 'frame_0'\n \tif min_index != current_leg:\n \t\tfor i in range(len(path[min_index][action_push_pull[min_index]])):\n \t\t\tpose = PoseStamped()\n \t\t\tpose.pose.position.x, pose.pose.position.y, pose.pose.orientation.z =path[min_index][action_push_pull[min_index]][i]\n \t\t\tPAM_path_planned.poses.append(pose)\n else:\n \tmin_index = 5\n \tmin_value = float(\"inf\")\n if 0 < min_index and min_index <= 4:\n force_d = force[min_index][0]\n else:\n force_d = [0,0,0]\n\n return cost ,min_index, force_d, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned", "def algorithm_parameters(alg):\n if alg in list(SEM_TYPE.keys()):\n return simulate_parameters(alg)\n\n param_dict = dict()\n\n param = getfullargspec(INLINE_ALGORITHMS[alg.upper()].__init__)\n if param is not None:\n param_len = len(param.args)\n if param.defaults:\n if 'input_dim' in param.args:\n param_dict.update({'input_dim': None})\n for index, value in enumerate(reversed(param.defaults)):\n if not isfunction(value) and (value is not None):\n param_dict.update(\n {param.args[param_len - index - 1]: value})\n param = getfullargspec(INLINE_ALGORITHMS[alg.upper()].learn)\n if param is not None:\n param_len = len(param.args)\n if param_len > 2:\n if 'rank' in param.args:\n param_dict.update({'rank': None})\n return param_dict", "def gail():\n algorithm = \"gail\"", "def __init__(self, meta_agent, agent_idx):\n self.state_size = meta_agent.state_size\n self.action_size = meta_agent.action_size\n self.num_agents=meta_agent.num_agents\n self.seed = random.seed(meta_agent.random_seed)\n self.agent_idx=agent_idx\n self.epsilon=EPSILON\n print('meta_agent.state_size, action_size,num_agents,seed_agent_idx',meta_agent.state_size,meta_agent.action_size,meta_agent.num_agents,meta_agent.random_seed,agent_idx)\n\n # Actor Network (w/ Target Network)\n self.actor_local = Actor(self.state_size, self.action_size, meta_agent.random_seed).to(device)\n self.actor_target = Actor(self.state_size, self.action_size, meta_agent.random_seed).to(device)\n self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)\n self.hard_copy(self.actor_target,self.actor_local)\n # Critic Network (w/ Target Network)\n self.critic_local = Critic(self.state_size*self.num_agents, self.action_size*self.num_agents, meta_agent.random_seed).to(device)\n self.critic_target = Critic(self.state_size*self.num_agents, self.action_size*self.num_agents, meta_agent.random_seed).to(device)\n self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) \n self.hard_copy(self.critic_target,self.critic_local)\n print('Agent:',self.agent_idx,'\\n Actor-Critic \\n',self.actor_local,self.critic_local)\n \n # Noise process\n self.noise = OUNoise(self.action_size, meta_agent.random_seed)", "def __init__(self, n_x, n_z, qz_hid, px_hid, filters, seq_length=50, nonlinearity=rectify,\r\n px_nonlinearity=None, x_dist='linear', batchnorm=False, seed=1234):\r\n super(CVAE, self).__init__(n_x, qz_hid + px_hid, n_z, nonlinearity)\r\n self.x_dist = x_dist\r\n self.n_x = n_x\r\n self.seq_length = seq_length\r\n self.n_z = n_z\r\n self.batchnorm = batchnorm\r\n self._srng = RandomStreams(seed)\r\n\r\n # Pool layer cache\r\n pool_layers = []\r\n\r\n # Decide Glorot initializaiton of weights.\r\n init_w = 1e-3\r\n hid_w = \"\"\r\n if nonlinearity == rectify or nonlinearity == softplus:\r\n hid_w = \"relu\"\r\n\r\n # Define symbolic variables for theano functions.\r\n self.sym_x = T.tensor3('x') # inputs\r\n self.sym_z = T.matrix('z')\r\n self.sym_samples = T.iscalar('samples') # MC samples\r\n\r\n # Assist methods for collecting the layers\r\n def dense_layer(layer_in, n, dist_w=init.GlorotNormal, dist_b=init.Normal):\r\n dense = DenseLayer(layer_in, n, dist_w(hid_w), dist_b(init_w), None)\r\n if batchnorm:\r\n dense = bn(dense)\r\n return NonlinearityLayer(dense, self.transf)\r\n\r\n def stochastic_layer(layer_in, n, samples, nonlin=None):\r\n mu = DenseLayer(layer_in, n, init.Normal(init_w), init.Normal(init_w), nonlin)\r\n logvar = DenseLayer(layer_in, n, init.Normal(init_w), init.Normal(init_w), nonlin)\r\n return SampleLayer(mu, logvar, eq_samples=samples, iw_samples=1), mu, logvar\r\n\r\n def conv_layer(layer_in, filter, stride=(1, 1), pool=1, name='conv'):\r\n l_conv = Conv2DLayer(layer_in, num_filters=filter, filter_size=(3, 1), stride=stride, pad='full', name=name)\r\n if pool > 1:\r\n l_conv = MaxPool2DLayer(l_conv, pool_size=(pool, 1))\r\n pool_layers.append(l_conv)\r\n return l_conv\r\n\r\n # Reshape input\r\n l_x_in = InputLayer((None, seq_length, n_x), name='Input')\r\n l_x_in_reshp = ReshapeLayer(l_x_in, (-1, 1, seq_length, n_x))\r\n print(\"l_x_in_reshp\", l_x_in_reshp.output_shape)\r\n\r\n # CNN encoder implementation\r\n l_conv_enc = l_x_in_reshp\r\n for filter, stride, pool in filters:\r\n l_conv_enc = conv_layer(l_conv_enc, filter, stride, pool)\r\n print(\"l_conv_enc\", l_conv_enc.output_shape)\r\n\r\n # Pool along last 2 axes\r\n l_global_pool_enc = GlobalPoolLayer(l_conv_enc)\r\n l_enc = dense_layer(l_global_pool_enc, n_z)\r\n print(\"l_enc\", l_enc.output_shape)\r\n\r\n # Recognition q(z|x)\r\n l_qz = l_enc\r\n for hid in qz_hid:\r\n l_qz = dense_layer(l_qz, hid)\r\n l_qz, l_qz_mu, l_qz_logvar = stochastic_layer(l_qz, n_z, self.sym_samples)\r\n print(\"l_qz\", l_qz.output_shape)\r\n\r\n # Inverse pooling\r\n l_global_depool = InverseLayer(l_qz, l_global_pool_enc)\r\n print(\"l_global_depool\", l_global_depool.output_shape)\r\n\r\n # Reverse pool layer order\r\n pool_layers = pool_layers[::-1]\r\n\r\n # Decode\r\n l_deconv = l_global_depool\r\n for idx, filter in enumerate(filters[::-1]):\r\n filter, stride, pool = filter\r\n if pool > 1:\r\n l_deconv = InverseLayer(l_deconv, pool_layers[idx])\r\n l_deconv = Conv2DLayer(l_deconv, num_filters=filter, filter_size=(3, 1), stride=(stride, 1), W=init.GlorotNormal('relu'))\r\n print(\"l_deconv\", l_deconv.output_shape)\r\n\r\n # The last l_conv layer should give us the input shape\r\n l_dec = Conv2DLayer(l_deconv, num_filters=1, filter_size=(3, 1), pad='same', nonlinearity=None)\r\n print(\"l_dec\", l_dec.output_shape)\r\n\r\n # Flatten first two dimensions\r\n l_dec = ReshapeLayer(l_dec, (-1, n_x))\r\n\r\n l_px = l_dec\r\n if x_dist == 'bernoulli':\r\n l_px = DenseLayer(l_px, n_x, init.GlorotNormal(), init.Normal(init_w), sigmoid)\r\n elif x_dist == 'multinomial':\r\n l_px = DenseLayer(l_px, n_x, init.GlorotNormal(), init.Normal(init_w), softmax)\r\n elif x_dist == 'gaussian':\r\n l_px, l_px_mu, l_px_logvar = stochastic_layer(l_px, n_x, self.sym_samples, px_nonlinearity)\r\n elif x_dist == 'linear':\r\n l_px = DenseLayer(l_px, n_x, nonlinearity=None)\r\n\r\n # Reshape all the model layers to have the same size\r\n self.l_x_in = l_x_in\r\n\r\n self.l_qz = ReshapeLayer(l_qz, (-1, self.sym_samples, 1, n_z))\r\n self.l_qz_mu = DimshuffleLayer(l_qz_mu, (0, 'x', 'x', 1))\r\n self.l_qz_logvar = DimshuffleLayer(l_qz_logvar, (0, 'x', 'x', 1))\r\n\r\n self.l_px = DimshuffleLayer(ReshapeLayer(l_px, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4))\r\n self.l_px_mu = DimshuffleLayer(ReshapeLayer(l_px_mu, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4)) \\\r\n if x_dist == \"gaussian\" else None\r\n self.l_px_logvar = DimshuffleLayer(ReshapeLayer(l_px_logvar, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4)) \\\r\n if x_dist == \"gaussian\" else None\r\n\r\n # Predefined functions\r\n inputs = {self.l_x_in: self.sym_x}\r\n outputs = get_output(l_qz, inputs, deterministic=True)\r\n self.f_qz = theano.function([self.sym_x, self.sym_samples], outputs)\r\n\r\n inputs = {l_qz: self.sym_z}\r\n outputs = get_output(self.l_px, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_px = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n outputs = get_output(self.l_px_mu, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_mu = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n outputs = get_output(self.l_px_logvar, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_var = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n # Define model parameters\r\n self.model_params = get_all_params([self.l_px])\r\n self.trainable_model_params = get_all_params([self.l_px], trainable=True)", "def main():\n parser = argparse.ArgumentParser(description='MergeGVCFs and genotype them using the GATK')\n parser.add_argument('-g', '--gatk', dest='gatk', help=\"Location of the GATK\", required=True)\n parser.add_argument('-x', '--xmx', dest='xmx', help=\"Memory to use with JAVA\", required=True)\n parser.add_argument('-c', '--cores', dest='cores', help=\"Number of cores to use\")\n parser.add_argument('-o', '--output', dest='output', \n help='Final output from the haplotype caller')\n parser.add_argument('-r', '--reference', dest='reference', \n help='Reference FASTA file')\n parser.add_argument('-b','--bed', dest='bed_file',\n help=\"Bed file for limiting the GATK\")\n parser.add_argument('-p', '--ploidy', dest='ploidy', \n help=\"Sample ploidy\", default=2)\n parser.add_argument('-d', '--out_directory', dest='directory', help='Output director')\n parser.add_argument('bams', nargs=\"*\", help='gVCF variant call files output from the GATK')\n args = parser.parse_args()\n args.cores = int(args.cores)\n args.xmx = args.xmx.strip('\"')\n print args.bams\n genovcfs = haplotype_caller(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n bams=args.bams, reference=args.reference,\n out_directory=args.directory, ploidy=args.ploidy, bed_file=args.bed_file)\n outputs = merge_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n gvcfs=genovcfs, reference=args.reference)\n genotype_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n inputs=outputs, output=args.output, reference=args.reference,bed_file=args.bed_file)\n #haplotype_single(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n # inputs=args.gvcfs, reference=args.reference)", "def __init__(self, autosub=False):\n self.G = nx.DiGraph()\n self.autosub = autosub\n \"\"\"\n Graph object of this analyzer.\n It is actually a networkx directed graph object(DiGraph), so you can apply all operations available to DiGraph object using networkx.\n \"\"\"\n self.entityList = [dict() for x in range(len(NEList))]\n \"\"\"\n List of entities appeared during this analysis round.\n \"\"\"\n self.proList = list()\n \"\"\"\n List of pronouns appeared during this analysis round.\n \"\"\"\n self.pos = 0\n \"\"\"\n Current position of the analyzer.\n \"\"\"\n self.proc = Subprocess('cabocha -f1')\n \"\"\"\n Communicator to backend for KnowledgeAnalyzer.\n \"\"\"", "def __init__(self, method=\"RandomForest\", n_random_feature_ratio=5, problem_type=\"infer\", rows_to_scan=\"all\"):\n self.feature_importances_ = None\n self.method = method\n self.problem_type = problem_type \n self.rows_to_scan = rows_to_scan \n self.n_random_feature_ratio = n_random_feature_ratio", "def main():\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn AgglomerativeClustering method. \", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the clustered dataset. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the clustering plot. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n agglomerative_clustering(input_dataset_path=args.input_dataset_path,\n output_results_path=args.output_results_path,\n output_plot_path=args.output_plot_path,\n properties=properties)", "def __init__(self, n_samples, max_iter=1000, verbose=1, eps=1e-3):\n\n\t\tself.sparam = dict()\n\t\twith open('config.json') as config_file:\n\t\t\tself.sparam = json.load(config_file)\n\n\t\tself.C = self.sparam['C']\n\t\tself.sizePsi = self.sparam['sizePsi']\n\t\tself.num_classes = self.sparam['num_classes']\n\t\t#self.w = np.random.rand(sparam['sizePsi'],1)\n\t\tself.w = np.zeros((self.sparam['sizePsi'],1))\n\t\tself.tempw = np.zeros((self.sparam['sizePsi'],1))\n\t\t#self.tempw = np.random.rand(self.sparam['sizePsi'], 1)\n\t\t#self.tempw = np.random.rand(self.sparam['sizePsi'])\n\t\t#self.tempw[0:self.sizePsi/2] = np.zeros(self.sizePsi/2)\n\t\t#self.tempw = self.tempw.reshape(self.sizePsi, 1)\n\t\t#print np.sum(self.tempw)\n\t\tself.w_changed = False\n\t\tself.n = n_samples\n\t\tself.max_iter = max_iter\n\t\tself.verbose = verbose\n\t\tself.eps = eps\n\t\tself.alphas = []\n\t\tself.losses = []", "def _define_biophysics(self):\n\t\tfor node in self.node:\n\t\t\tnode.nseg=1\n\t\t\tnode.diam=self._nodeD\n\t\t\tnode.L=self._nodeLength\n\t\t\tnode.Ra=self._rhoa/10000\n\t\t\tnode.cm=2\n\t\t\tnode.insert('axnode')\n\t\t\tnode.insert('extracellular')\n\t\t\tnode.xraxial[0]=self._Rpn0\n\t\t\tnode.xg[0]=1e10\n\t\t\tnode.xc[0]=0\n\n\t\tfor mysa in self.mysa:\n\t\t\tmysa.nseg=1\n\t\t\tmysa.diam=self._fiberD\n\t\t\tmysa.L=self._paraLength1\n\t\t\tmysa.Ra=self._rhoa*(1/(self._paraD1/self._fiberD)**2)/10000\n\t\t\tmysa.cm=2*self._paraD1/self._fiberD\n\t\t\tmysa.insert('pas')\n\t\t\tmysa.g_pas=0.001*self._paraD1/self._fiberD\t\t\n\t\t\tmysa.e_pas=-80\n\t\t\tmysa.insert('extracellular')\n\t\t\tmysa.xraxial[0]=self._Rpn1\n\t\t\tmysa.xg[0]=self._mygm/(self._nl*2)\n\t\t\tmysa.xc[0]=self._mycm/(self._nl*2)\n\n\t\tfor flut in self.flut:\n\t\t\tflut.nseg=1\n\t\t\tflut.diam=self._fiberD\n\t\t\tflut.L=self._paraLength2\n\t\t\tflut.Ra=self._rhoa*(1/(self._paraD2/self._fiberD)**2)/10000\n\t\t\tflut.cm=2*self._paraD2/self._fiberD\n\t\t\tflut.insert('pas')\n\t\t\tflut.g_pas=0.0001*self._paraD2/self._fiberD\t\t\n\t\t\tflut.e_pas=-80\n\t\t\tflut.insert('extracellular')\n\t\t\tflut.xraxial[0]=self._Rpn2\n\t\t\tflut.xg[0]=self._mygm/(self._nl*2)\n\t\t\tflut.xc[0]=self._mycm/(self._nl*2)\n\t\t\n\t\tfor stin in self.stin:\n\t\t\tstin.nseg=1\n\t\t\tstin.diam=self._fiberD\n\t\t\tstin.L=self._interLength\n\t\t\tstin.Ra=self._rhoa*(1/(self._axonD/self._fiberD)**2)/10000\n\t\t\tstin.cm=2*self._axonD/self._fiberD\n\t\t\tstin.insert('pas')\n\t\t\tstin.g_pas=0.0001*self._axonD/self._fiberD\n\t\t\tstin.e_pas=-80\n\t\t\tstin.insert('extracellular')\n\t\t\tstin.xraxial[0]=self._Rpx\n\t\t\tstin.xg[0]=self._mygm/(self._nl*2)\n\t\t\tstin.xc[0]=self._mycm/(self._nl*2)", "def __init__(self):\n super(enc_clf, self).__init__()\n\n self.fc1 = nn.Linear(784, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 512)\n self.fc4 = nn.Linear(512, 10)", "def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model", "def __init__(self, x, y, input_len, sigma=1.0, learning_rate=0.5,\n decay_function=asymptotic_decay,\n neighborhood_function='gaussian', topology='rectangular',\n activation_distance='euclidean', random_seed=None):\n if sigma >= x or sigma >= y:\n warn('Warning: sigma is too high for the dimension of the map.')\n\n self._random_generator = random.RandomState(random_seed)\n\n self._learning_rate = learning_rate\n self._sigma = sigma\n self._input_len = input_len\n # random initialization\n self._weights = self._random_generator.rand(x, y, input_len)*2-1\n self._weights /= linalg.norm(self._weights, axis=-1, keepdims=True)\n\n self._activation_map = zeros((x, y))\n self._neigx = arange(x)\n self._neigy = arange(y) # used to evaluate the neighborhood function\n\n if topology not in ['hexagonal', 'rectangular']:\n msg = '%s not supported only hexagonal and rectangular available'\n raise ValueError(msg % topology)\n self.topology = topology\n self._xx, self._yy = meshgrid(self._neigx, self._neigy)\n self._xx = self._xx.astype(float)\n self._yy = self._yy.astype(float)\n if topology == 'hexagonal':\n self._xx[::-2] -= 0.5\n if neighborhood_function in ['triangle']:\n warn('triangle neighborhood function does not ' +\n 'take in account hexagonal topology')\n\n self._decay_function = decay_function\n\n neig_functions = {'gaussian': self._gaussian,\n 'mexican_hat': self._mexican_hat,\n 'bubble': self._bubble,\n 'triangle': self._triangle}\n\n if neighborhood_function not in neig_functions:\n msg = '%s not supported. Functions available: %s'\n raise ValueError(msg % (neighborhood_function,\n ', '.join(neig_functions.keys())))\n\n if neighborhood_function in ['triangle',\n 'bubble'] and (divmod(sigma, 1)[1] != 0\n or sigma < 1):\n warn('sigma should be an integer >=1 when triangle or bubble' +\n 'are used as neighborhood function')\n\n self.neighborhood = neig_functions[neighborhood_function]\n\n distance_functions = {'euclidean': self._euclidean_distance,\n 'cosine': self._cosine_distance,\n 'manhattan': self._manhattan_distance,\n 'chebyshev': self._chebyshev_distance}\n\n if isinstance(activation_distance, str):\n if activation_distance not in distance_functions:\n msg = '%s not supported. Distances available: %s'\n raise ValueError(msg % (activation_distance,\n ', '.join(distance_functions.keys())))\n\n self._activation_distance = distance_functions[activation_distance]\n elif callable(activation_distance):\n self._activation_distance = activation_distance", "def generate_models(R, u_t, inverse_transform, algo):\n model_list = []\n it_max = 10000 # maximum number of iterations after which the Lasso and SR3 are stopped to save computational time\n # in our experience, if the model converges at all, this is usually far sooner than 10000 iterations\n tol_iterativ = 10 * np.finfo(float).eps # convergence tolerance of SR3 and Lasso\n if algo == 'FoBa':\n log_epsilon_range = np.arange(-15., 15., 0.5)\n for log_epsilon in log_epsilon_range:\n w = FoBa(R, u_t, epsilon=10 ** log_epsilon, backwards_freq=1, maxit_f=20)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'Lasso':\n log_lambda_range = np.arange(-15., 15., 0.5) # l1 factor\n for log_lambda in log_lambda_range:\n # initialize Lasso model\n clf = linear_model.Lasso(alpha=10**log_lambda, copy_X=True, fit_intercept=True, max_iter=it_max,\n normalize=False, positive=False, precompute=False, random_state=None,\n selection='cyclic', tol=tol_iterativ, warm_start=False)\n clf.fit(R, u_t) # fit model\n w = clf.coef_\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'STRidge':\n log_lambda_range = np.arange(-15, 15., 1.) # l2 factor (Ridge)\n log_tol_range = np.arange(-16, 10., 1.)\n for log_lambda in log_lambda_range:\n for log_tol in log_tol_range:\n w = STRidge(R, u_t, maxit=1000, lam=10**log_lambda, tol=10**log_tol, normalize=2)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'SR3':\n # Uses python-matlab interface to directly use the original SR3 implementation.\n # Note that setting up the interface can be a bit tricky; if setting up the interface is too much effort,\n # just leave SR3 out of the 'algo_list' in the SITE file.\n t_sr3_start = time.time()\n eng = matlab.engine.start_matlab()\n eng.setup_matlab(nargout=0)\n log_lambda_range = np.arange(-15, 15., 1.) # l1 factor\n log_kappa_range = np.arange(-5, 6., 1.)\n for log_kappa in log_kappa_range:\n for log_lambda in log_lambda_range:\n R_matlab = matlab.double(R.tolist())\n u_t_matlab = matlab.double(u_t.tolist())\n # iters can be used to check if model converged or it_max was reached\n x, w, iters = eng.sr3(R_matlab, u_t_matlab, 'mode', '0', 'kap', (10**log_kappa).item(), 'lam',\n (10**log_lambda).item(), 'itm', it_max, 'tol', tol_iterativ.item(), 'ptf',\n 45000, nargout=3)\n w = np.asarray(w)\n initialize_model(w, model_list, algo, inverse_transform)\n eng.quit()\n print('Time for evaluation SR3: ', time.time() - t_sr3_start)\n\n else: raise ('The algorithm ' + str(algo) + ' is not implemented! (or a typo)')\n return model_list", "def gen_ap_knee_def(knee=None):\n\n while True:\n\n ap_params = [None, None, None]\n\n ap_params[0] = np.random.choice(OFF_OPTS, p=OFF_PROBS)\n ap_params[1] = knee if knee is not None else np.random.choice(KNE_OPTS, p=KNE_PROBS)\n ap_params[2] = np.random.choice(EXP_OPTS, p=EXP_PROBS)\n\n yield ap_params", "def _main():\n min_args = 6\n max_args = min_args + 3\n if len(sys.argv) not in range(min_args, max_args + 1):\n print(_HELP_STR)\n sys.exit(1)\n\n n_o = int(sys.argv[1])\n d = int(sys.argv[2])\n r_q = int(sys.argv[3])\n q = int(sys.argv[4])\n eps = float(sys.argv[5])\n kern = sys.argv[6] if len(sys.argv) > 6 else 'rbf'\n seed = int(sys.argv[7]) if len(sys.argv) > 7 else 1234\n testtype = sys.argv[8] if len(sys.argv) > 8 else 'inversion'\n kerntypes = ['rbf', 'periodic', 'matern', 'mix']\n testtypes = ['inv', 'opt']\n\n assert n_o > 7\n assert d > 0\n assert r_q > 0\n assert r_q <= d\n assert q > 0\n assert eps > 0\n assert kern in kerntypes\n assert testtype in testtypes\n np.random.seed(seed)\n n = n_o * d\n\n print('n_o {} d {} r_q {} q {} eps {} kern {} seed {} test-type {}'.format(\n n_o, d, r_q, q, eps, kern, seed, testtype))\n\n distrib = scipy.stats.truncnorm(-1, 1)\n coreg_vecs = distrib.rvs(size=(q, r_q, d))\n coreg_diags = np.reciprocal(np.random.gamma(shape=1, scale=1, size=(q, d)))\n noise = np.reciprocal(np.random.gamma(\n shape=(1 + (1 / eps)), scale=1, size=d))\n kernels = gen_kernels(q)\n descriptions = [\n 'rbf only - inv lengthscales in logspace(0, 1, q)',\n 'periodic only - inv lengthscale is 1, periods in logspace(0, 1, q)',\n 'matern32 only - inv lengthscales in logspace(0, 1, q)',\n 'mixed - rbf, periodic, matern varying params added together']\n kdict = {k_name: (k, desc) for k_name, k, desc in\n zip(kerntypes, kernels, descriptions)}\n\n Xs, Ys = np.random.rand(2, d, n_o)\n Xs = np.expand_dims(Xs, Xs.ndim)\n\n dists, grid_dists, interpolant, interpolant_T = prep(\n d, n_o, Xs)\n\n k, desc = kdict[kern]\n print()\n print(desc)\n\n fkern = FunctionalKernel(D=d, lmc_kernels=k,\n lmc_ranks=[len(x) for x in coreg_vecs])\n fkern.noise = noise\n fkern.coreg_vecs = coreg_vecs\n fkern.coreg_diags = coreg_diags\n fkern.set_input_dim(1)\n\n run_kernel_benchmark(\n Xs, Ys, fkern, dists, grid_dists, interpolant, interpolant_T, testtype)", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def agentbasedsim(L, a, pi, aenv, pienv, xi,\n adev=1.0, pidev=0.5,\n nind=10, ngeneration=100, nburnin=10,\n prng=None,\n callback=None):\n\n p, q = from_api(a, pi)\n alpha, beta = from_api(aenv, pienv)\n if not adev == 1.0:\n delta, epsilon = from_api(adev, pidev)\n\n # all parameters need to be in array form if cython acceleration is used\n if usecstepmarkov:\n alpha = _arrayify(alpha, L)\n beta = _arrayify(beta, L)\n p = _arrayify(p, (nind, L))\n q = _arrayify(q, (nind, L))\n if not adev == 1.0:\n delta = _arrayify(delta, (nind, L))\n epsilon = _arrayify(epsilon, (nind, L))\n \n env = np.zeros(L, dtype = bool)\n gen = np.zeros((nind, L), dtype = bool)\n \n totoffsprings = np.zeros(ngeneration)\n prng = prng if prng else np.random\n \n for generation in range(ngeneration):\n # time step environment\n rand = prng.rand(L)\n env = stepmarkov(env, alpha, beta, rand)\n if callback and generation >= nburnin:\n callback(gen, env)\n if not adev == 1.0:\n rand = prng.rand(nind, L)\n phen = stepmarkov2d(gen, delta, epsilon, rand)\n else:\n phen = gen\n # calculate growth rate\n noffspring = xi(phen, env)\n totoffspring = noffspring.sum()\n totoffsprings[generation] = totoffspring\n # time step population\n rand = prng.rand(nind, L)\n parent = gen[np.arange(nind).repeat(prng.multinomial(nind, noffspring/totoffspring))]\n gen = stepmarkov2d(parent, p, q, rand)\n \n # calculate Lambda = mean growth rate\n return np.mean(np.log(totoffsprings[nburnin:]/nind))", "def __init__(self, \n n_neurons = \"micro\", # else: \"brunel\" or arrays\n C_ab = \"micro\", # else: \"brunel\" or arrays\n area = net.area, # simulation size\n neuron_model = net.neuron_model, # \"iaf_psc_delta\" or \"iaf_psc_exp\"\n connection_rule = net.connection_rule, # \"fixed_total_number\" or \"fixed_indegree\"\n j02 = net.j02, \n weight_rel_sd = net.weight_rel_sd, \n delay_rel_sd = net.delay_rel_sd, \n g = net.g, \n rate_ext = net.rate_ext):\n ###################################################\n ### \tNetwork parameters\t\t### \n ###################################################\n\n # area of network in mm^2; scales numbers of neurons\n # use 1 for the full-size network (77,169 neurons)\n self.area = area\n \n self.layers = net.layers #np.array([\"L23\", \"L4\", \"L5\", \"L6\"])\n self.types = net.types #np.array([\"e\", \"i\"]) \n self.populations = np.array([layer + typus for layer in self.layers for typus in self.types])\n self.n_populations = len(self.populations)\n self.n_layers = len(self.layers)\n self.n_types = len(self.types)\n \n # Neuron numbers\n if n_neurons == \"micro\":\n self.n_neurons = np.int_(net.full_scale_n_neurons * self.area)\n elif n_neurons == \"brunel\":\n # Provide an array of equal number of neurons in each exc./inh. population\n gamma = 0.25\n inh_factor = 1. / (gamma + 1.)\n exc_factor = 1. - inh_factor \n n_total_micro = np.sum(net.full_scale_n_neurons * self.area)\n N_exc = n_total_micro/self.n_populations * exc_factor\n N_inh = n_total_micro/self.n_populations * inh_factor\n self.n_neurons = np.tile([N_exc, N_inh], self.n_layers).astype(int)\n else:\n if type(n_neurons) == np.ndarray:\n if n_neurons.shape == (self.n_populations, ):\n self.n_neurons = np.int_(n_neurons)\n else:\n raise Exception(\"'n_neurons' has wrong shape. \"+\n \"Expects (%i,)\"%self.n_populations)\n else: \n raise Exception(\"'n_neurons' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n self.n_total = np.sum(self.n_neurons)\n\n \n # Synapse numbers\n # Connection probabilities: conn_probs[post, pre] = conn_probs[target, source]\n conn_probs = net.conn_probs\n # Scale synapse numbers of the C_ab\n if net.scale_C_linearly:\n n_outer_full = np.outer(net.full_scale_n_neurons, net.full_scale_n_neurons)\n C_full_scale = np.log(1. - conn_probs) / np.log(1. - 1. / n_outer_full)\n C_scaled = np.int_(C_full_scale * self.area)\n else:\n n_outer = np.outer(self.n_neurons, self.n_neurons)\n C_scaled = np.int_(np.log(1. - conn_probs) / np.log(1. - 1. / n_outer))\n\n self.connection_rule = connection_rule\n if self.connection_rule == \"fixed_total_number\":\n C_ab_micro = C_scaled # total number, do not divide! \n elif self.connection_rule == \"fixed_indegree\":\n C_ab_micro = (C_scaled.T / (net.full_scale_n_neurons * self.area)).T\n else:\n raise Exception(\"Unexpected connection type. Use 'fixed_total_number' for microcircuit \" + \n \"model or 'fixed_indegree' for Brunel's model!\")\n\n if C_ab == \"micro\":\n self.C_ab = C_ab_micro # shall not be integer at this point!\n elif C_ab == \"brunel\":\n C_e = np.mean(C_ab_micro) # mean for microcircuit (= 501 in full scale)\n C_i = gamma * C_e\n self.C_ab = np.tile([C_e, C_i], (self.n_populations, self.n_layers)).astype(int) \n else:\n if type(C_ab) == np.ndarray:\n if C_ab.shape == (self.n_populations, self.n_populations):\n self.C_ab = np.int_(C_ab)\n else:\n raise Exception(\"'C_ab' has wrong shape. \"+\n \"Expects (%i, %i)\"%(self.n_populations, self.n_populations))\n else: \n raise Exception(\"'C_ab' expects either numpy.ndarray or string \"+\n \"in {'micro', 'brunel'}\")\n\n\n ###################################################\n ### Single-neuron parameters\t\t### \n ###################################################\n self.neuron_model = neuron_model\n self.Vm0_mean = net.Vm0_mean # mean of initial membrane potential (mV)\n self.Vm0_std = net.Vm0_std # std of initial membrane potential (mV)\n self.model_params = net.model_params\n if not self.neuron_model==\"iaf_psc_delta\":\n self.model_params[\"tau_syn_ex\"] = net.tau_syn_ex # excitatory synaptic time constant (ms)\n self.model_params[\"tau_syn_in\"] = net.tau_syn_in # inhibitory synaptic time constant (ms)\n self.tau_syn_ex = net.tau_syn_ex # ms\n self.tau_syn_in = net.tau_syn_in # ms\n self.tau_syn = np.tile([self.tau_syn_ex, self.tau_syn_in], (self.n_populations, self.n_layers))\n # Rescaling for model calculations: these values are not used in the simulation!\n self.tau_m = self.model_params[\"tau_m\"] # ms\n self.t_ref = self.model_params[\"t_ref\"] # ms\n self.E_L = self.model_params[\"E_L\"] # mV\n self.V_r = self.model_params[\"V_reset\"] - self.E_L # mV\n self.theta = self.model_params[\"V_th\"] - self.E_L # mV\n self.C_m = self.model_params[\"C_m\"] # pF\n\n\n ######################################################\n # Synaptic weights. Depend on neuron_model! ##\n ######################################################\n self.g = g\n self.j02 = j02\n\n g_all = np.tile([1., -self.g], (self.n_populations, self.n_layers))\n L23e_index = np.where(self.populations == \"L23e\")[0][0]\n L4e_index = np.where(self.populations == \"L4e\")[0][0]\n g_all[L23e_index, L4e_index] *= self.j02\n \n self.J = net.PSP_e # mv; mean PSP, used as reference PSP\n self.J_ab = self.J * g_all\n self.weight_rel_sd = weight_rel_sd # Standard deviation of weight relative to mean weight\n # Transformation from peak PSP to PSC\n delta_tau = self.tau_syn - self.tau_m\n ratio_tau = self.tau_m / self.tau_syn\n PSC_over_PSP = self.C_m * delta_tau / (self.tau_m * self.tau_syn * \\\n (ratio_tau**(self.tau_m / delta_tau) - ratio_tau**(self.tau_syn / delta_tau))) \n # Actual weights have to be adapted: from peak PSP to PSC (and back...)\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * PSC_over_PSP # neuron populations\n elif self.neuron_model==\"iaf_psc_delta\":\n self.weights = self.J_ab * PSC_over_PSP * (self.tau_syn_ex) / self.C_m\n # This might be an overkill / doing things twice...\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weights = self.J_ab * np.exp(1) / (self.tau_syn_ex) / self.C_m\n else:\n raise Exception(\"Neuron model should be iaf_psc_ - {delta, exp, alpha}!\")\n\n\n ###################################################\n ### Delays and dicts ### \n ###################################################\n # mean dendritic delays for excitatory and inhibitory transmission (ms)\n self.delay_e = net.delay_e # ms, excitatory synapses\n self.delay_i = net.delay_i # ms, inhibitory synapses\n\n self.delays = np.tile([self.delay_e, self.delay_i], (self.n_populations, self.n_layers)) # adapt...\n self.delay_rel_sd = delay_rel_sd \n \n # Synapse dictionaries\n # default connection dictionary\n self.conn_dict = {\"rule\": connection_rule}\n # weight distribution of connections between populations\n self.weight_dict_exc = net.weight_dict_exc\n self.weight_dict_inh = net.weight_dict_inh\n # delay distribution of connections between populations\n self.delay_dict = net.delay_dict\n # default synapse dictionary\n self.syn_dict = net.syn_dict\n \n \n ###################################################\n ### External stimuli ## \n ###################################################\n # rate of background Poisson input at each external input synapse (spikes/s) \n self.rate_ext = rate_ext # Hz \n self.J_ext = net.PSP_ext # external synaptic weight\n self.delay_ext = self.delay_e # ms; mean delay of external input\n self.dc_amplitude = net.dc_amplitude # constant bg amplitude\n self.C_aext = net.C_aext # in-degrees for background input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_ext = self.J_ext * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_ext = self.J_ext * np.exp(1) / self.tau_syn_ex / self.C_m\n\n # optional additional thalamic input (Poisson)\n self.n_th = net.n_th # size of thalamic population\n self.th_start = net.th_start # onset of thalamic input (ms)\n self.th_duration = net.th_duration # duration of thalamic input (ms)\n self.th_rate = net.th_rate # rate of thalamic neurons (spikes/s)\n self.J_th = net.PSP_th # mean EPSP amplitude (mV) for thalamic input\n # Adapt weights\n if self.neuron_model==\"iaf_psc_exp\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] \n elif self.neuron_model==\"iaf_psc_delta\":\n self.weight_th = self.J_th * PSC_over_PSP[0, 0] * self.tau_syn_ex / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\": # PSCs calculated from PSP amplitudes\n self.weight_th = self.J_th * np.exp(1) / self.tau_syn_ex / self.C_m\n\n \n # connection probabilities for thalamic input\n conn_probs_th = net.conn_probs_th\n if net.scale_C_linearly:\n if not self.n_th == 0:\n C_th_full_scale = np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * net.full_scale_n_neurons))\n self.C_th_scaled = np.int_(C_th_full_scale * self.area)\n else:\n if not self.n_th == 0:\n self.C_th_scaled = np.int_(np.log(1. - conn_probs_th) / \\\n np.log(1. - 1. / (self.n_th * self.n_neurons_micro)))\n if self.n_th == 0:\n self.C_th_scaled = None\n \n # mean delay of thalamic input (ms)\n self.delay_th = net.delay_th\n # standard deviation relative to mean delay of thalamic input\n self.delay_th_rel_sd = net.delay_th_rel_sd\n\n\n ######################################################\n # Predefine matrices for mean field ##\n ######################################################\n if self.neuron_model==\"iaf_psc_delta\":\n self.J_mu = self.weights\n self.J_sd = self.weights\n self.J_mu_ext = self.weight_ext \n self.J_sd_ext = self.weight_ext\n elif self.neuron_model==\"iaf_psc_exp\":\n self.J_mu = self.weights * self.tau_syn / self.C_m\n self.J_sd = self.weights * np.sqrt(self.tau_syn / 2.) / self.C_m\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex / self.C_m\n self.J_sd_ext = self.weight_ext * np.sqrt(self.tau_syn_ex / 2.) / self.C_m\n elif self.neuron_model==\"iaf_psc_alpha\":\n self.J_mu = self.weights * self.tau_syn**2 / self.C_m\n self.J_sd = self.weights * self.tau_syn**(3./2.) / (self.C_m * 2.)\n self.J_mu_ext = self.weight_ext * self.tau_syn_ex**2 / self.C_m\n self.J_sd_ext = self.weight_ext * self.tau_syn_ex**(3./2.) / (self.C_m * 2.)\n self.mat_mu = self.tau_m * 1e-3 * self.J_mu * self.C_ab\n self.mu_ext = self.tau_m * 1e-3 * self.J_mu_ext * self.C_aext * self.rate_ext\n self.mat_var = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd**2 * self.C_ab\n self.var_ext = self.tau_m * 1e-3 * (1 + self.weight_rel_sd ** 2) * self.J_sd_ext**2 * self.C_aext * self.rate_ext", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def __init__(self, dictAlg):\n\n # values of dict dictAlg are DataSetList which should have only one\n # element which will be assigned as values in the following lines.\n d = set()\n f = set()\n for i in dictAlg.values():\n d |= set(j.dim for j in i)\n f |= set(j.funcId for j in i)\n\n if len(f) > 1 or len(d) > 1:\n Usage('Expect the data of algorithms for only one function and '\n 'one dimension.')\n\n f = f.pop()\n d = d.pop()\n\n dictMaxEvals = {}\n dictFinalFunVals = {}\n tmpdictAlg = {}\n for alg, i in dictAlg.iteritems():\n if len(i) == 0:\n warnings.warn('Algorithm %s was not tested on f%d %d-D.'\n % (alg, f, d))\n continue\n elif len(i) > 1:\n warnings.warn('Algorithm %s has a problem on f%d %d-D.'\n % (alg, f, d))\n continue\n\n tmpdictAlg[alg] = i[0] # Assign ONLY the first element as value\n dictMaxEvals[alg] = i[0].maxevals\n dictFinalFunVals[alg] = i[0].finalfunvals\n\n dictAlg = tmpdictAlg\n\n sortedAlgs = dictAlg.keys()\n # algorithms will be sorted along sortedAlgs which is now a fixed list\n\n # Align ERT\n erts = list(np.transpose(np.vstack([dictAlg[i].target, dictAlg[i].ert]))\n for i in sortedAlgs)\n res = readalign.alignArrayData(readalign.HArrayMultiReader(erts))\n\n resalgs = []\n reserts = []\n # For each function value\n for i in res:\n # Find best algorithm\n curerts = i[1:]\n assert len((np.isnan(curerts) == False)) > 0\n currentbestert = np.inf\n currentbestalg = ''\n for j, tmpert in enumerate(curerts):\n if np.isnan(tmpert):\n continue # TODO: don't disregard these entries\n if tmpert == currentbestert:\n # TODO: what do we do in case of ties?\n # look at function values corresponding to the ERT?\n # Look at the function evaluations? the success ratio?\n pass\n elif tmpert < currentbestert:\n currentbestert = tmpert\n currentbestalg = sortedAlgs[j]\n reserts.append(currentbestert)\n resalgs.append(currentbestalg)\n\n dictiter = {}\n dictcurLine = {}\n resDataSet = []\n\n # write down the #fevals to reach the function value.\n for funval, alg in zip(res[:, 0], resalgs):\n it = dictiter.setdefault(alg, iter(dictAlg[alg].evals))\n curLine = dictcurLine.setdefault(alg, np.array([np.inf, 0]))\n while curLine[0] > funval:\n try:\n curLine = it.next()\n except StopIteration:\n break\n dictcurLine[alg] = curLine.copy()\n tmp = curLine.copy()\n tmp[0] = funval\n resDataSet.append(tmp)\n\n setalgs = set(resalgs)\n dictFunValsNoFail = {}\n for alg in setalgs:\n for curline in dictAlg[alg].funvals:\n if (curline[1:] == dictAlg[alg].finalfunvals).any():\n # only works because the funvals are monotonous\n break\n dictFunValsNoFail[alg] = curline.copy()\n\n self.evals = resDataSet\n # evals is not a np array but a list of arrays because they may not\n # all be of the same size.\n self.maxevals = dict((i, dictMaxEvals[i]) for i in setalgs)\n self.finalfunvals = dict((i, dictFinalFunVals[i]) for i in setalgs)\n self.funvalsnofail = dictFunValsNoFail\n self.dim = d\n self.funcId = f\n self.algs = resalgs\n self.algId = 'Virtual Best Algorithm'\n self.comment = 'Combination of ' + ', '.join(sortedAlgs)\n self.ert = np.array(reserts)\n self.target = res[:, 0]\n\n bestfinalfunvals = np.array([np.inf])\n for alg in sortedAlgs:\n if np.median(dictAlg[alg].finalfunvals) < np.median(bestfinalfunvals):\n bestfinalfunvals = dictAlg[alg].finalfunvals\n algbestfinalfunvals = alg\n self.bestfinalfunvals = bestfinalfunvals\n self.algbestfinalfunvals = algbestfinalfunvals", "def __init__(self,start,goal,theta_s,clearance,radius,rpm1,rpm2):\r\n #clearance = 5\r\n #radius = 10\r\n self.padding = clearance + radius\r\n self.ground_truth={}\r\n\r\n self.obstacle=[]\r\n self.rpm1=rpm1\r\n self.rpm2=rpm2\r\n self.expanded=[]\r\n\r\n self.parent=[]\r\n self.parent_orignal_data={}\r\n \r\n self.start=start\r\n #print(self.start)\r\n self.theta=theta_s\r\n self.theta_diff=30\r\n self.n=int(self.theta)\r\n self.frontier={}\r\n self.frontier[self.start[0],self.start[1],self.n]=0\r\n self.start_score=self.string(self.start[0],self.start[1],self.n)\r\n self.frontier_string=[]\r\n self.cost_togo={}\r\n self.cost_togo[self.start_score,self.n]=0\r\n self.parent_orignal_data[self.start_score]=None\r\n self.cost={}\r\n #self.cost=0\r\n self.goal=goal\r\n self.cost[self.start_score,self.n]=self.cost_togo[self.start_score,self.n]+self.h(self.start[0],self.start[1],self.theta)\r\n #self.cost[self.start_score,self.n]=self.cost_togo[self.start_score,self.n]+self.h(self.start[0],self.start[1])\r\n self.data_with_string={}\r\n self.data_with_string[self.start_score]=self.start\r\n self.current_score=\"00\"\r\n self.i=1\r\n self.theta_diff=30\r\n #self.cost={}\r\n #self.cost[self.start_score]=0\r\n self.dt=0.2\r\n self.threshold=1\r\n self.maximum_size=999\r\n self.parent_pos=(self.start[1],self.maximum_size-self.start[0])\r\n self.image_p=np.zeros([int(floor((self.maximum_size+1))),int(floor((self.maximum_size+1))),(360)])\r\n self.action_rpm=[[0,rpm1],[rpm1,0],[rpm1,rpm1],[0,rpm2],[rpm2,0],[rpm2,rpm2],[rpm1,rpm2],[rpm2,rpm1]]\r\n self.action_index={}", "def main():\n\n config = read_json_file(CONFIG_FILE)\n posititve_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"POSITIVE_FILENAME\"]\n )\n negative_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"NEGATIVE_FILENAME\"]\n )\n complexity_factor = config[\"main\"][\"COMPLEXITY_FACTOR\"]\n max_sequences_to_fit_pos = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_POS\"]\n max_sequences_to_fit_neg = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_NEG\"]\n\n input_organisms_path = config[\"main\"][\"INPUT_FILENAME\"]\n mean_nodes = 3.0\n mean_fitness = 150\n positive_dataset = read_fasta_file(posititve_path)\n positive_dataset.sort()\n negative_dataset = read_fasta_file(negative_path)\n print(\"{} {}\".format(len(positive_dataset), len(negative_dataset)))\n\n organism_factory = OrganismFactory(\n config[\"organism\"],\n config[\"organismFactory\"],\n config[\"connector\"],\n config[\"pssm\"],\n )\n\n a_organisms = organism_factory.import_organisms(input_organisms_path)\n # random.shuffle(negativeDataset)\n\n for org in a_organisms:\n\n # org.print()\n nodes = org.count_nodes()\n\n p_1 = org.get_seq_set_fitness(\n positive_dataset[:max_sequences_to_fit_pos]\n )\n n_1 = org.get_seq_set_fitness(\n negative_dataset[:max_sequences_to_fit_neg]\n )\n # p1 = 20\n # n1 = org.getSeqSetFitness(negativeDataset[31:32])\n c_1 = org.get_complexity(mean_nodes, mean_fitness)\n\n # Score\n fitness = p_1 - n_1\n effective_fitness = fitness - complexity_factor * c_1\n print(\n (\n \"ORG {} N: {:.2f} P: {:.2f} N: {:.2f} C: {:.2f} F: {:.2f}\"\n + \" EF: {:.2f}\\n\"\n ).format(org._id, nodes, p_1, n_1, c_1, fitness, effective_fitness)\n )\n\n export_organism(\n org,\n positive_dataset,\n \"{}positive_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )\n # exportOrganism(\n # org,\n # negativeDataset[31:32],\n # \"{}negative_{}\".format(config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org.ID),\n # organismFactory,\n # )\n\n export_organism(\n org,\n negative_dataset[:50],\n \"{}negative_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )", "def __init__(self, asa_factory: AsaFactory):\n super().__init__(asa_factory) # initialize step_in_progress flag\n self.agent, self.sampler, self.algo = asa_factory()\n self.batch_spec = self.sampler.batch_spec\n self.grad = None\n self.traj_infos = None\n self.opt_info = None", "def train_naive(): # add arguments as needed\n pass", "def main(_):\n hps = LM.get_default_hparams().parse(FLAGS.hpconfig)\n hps._set(\"num_gpus\", FLAGS.num_gpus)\n print ('*****HYPER PARAMETERS*****')\n print (hps)\n print ('**************************')\n\n vocab = Vocabulary.from_file(os.path.join(FLAGS.datadir, \"vocabulary.txt\"))\n\n if FLAGS.mode == \"train\":\n #hps.batch_size = 256\n dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"train.txt\"))\n run_train(dataset, hps, os.path.join(FLAGS.logdir, \"train\"), ps_device=\"/gpu:0\")\n elif FLAGS.mode.startswith(\"eval\"):\n data_dir = os.path.join(FLAGS.datadir, \"eval.txt\")\n #predict_model = prediction.Model('/dir/ckpt',os.path.join(FLAGS.datadir, \"vocabulary.txt\"), hps)\n\n dataset = Dataset(vocab, data_dir, deterministic=True)\n prefix_words = \"<brk>\".split()\n predict_model = predict.Model(hps, FLAGS.logdir, FLAGS.datadir)\n print ('start input')\n out = predict_model.predictnextkwords(prefix_words, FLAGS.num_sen)\n for row in out:\n print(' '.join(row) + \"\\n\")\n print(\"len_out: \" + str(len(out)))\n #prediction.topkwords(prefix_words, dataset, hps, FLAGS.logdir, FLAGS.mode)\n #sentence_ppl(prefix_words,dataset, hps, FLAGS.logdir, FLAGS.mode)\n #print vocab\n #dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"eval.txt\"))\n #run_eval(dataset, hps, FLAGS.logdir, FLAGS.mode, FLAGS.eval_steps)", "def process(data, cluster_criteria, method = \"PP\", \\\n min_height = 0, pixel_size = 0, \\\n relax = 0, stop = 0, \\\n verbose = True, interactive = False,\n n_jobs = 1, nsteps = 1 ):\n\n#==============================================================================#\n \"\"\"\n Initial prep of key variables\n \"\"\"\n\n self = Acorns()\n start = time.time()\n\n # User input information\n self.cluster_criteria = cluster_criteria\n\n if np.size(relax) == 1:\n self.relax = relax if (relax != 0) else -1.0\n relaxcond = True if (relax != 0) else False\n else:\n self.relax = relax\n relaxcond = True\n\n if method == \"PP\":\n self.method = 0\n elif method == \"PPV\":\n self.method = 1\n elif method == \"PPP\":\n self.method = 2\n else:\n raise ValueError('method {0:s} unknown'.format(method))\n method = str(method)\n\n # Generate some important information:\n self.minnpix_cluster = get_minnpix(self, pixel_size, self.cluster_criteria[0])\n self.min_height = min_height\n self.max_dist = get_maxdist(self, pixel_size)\n self.cluster_criteria[0] = self.max_dist\n self.min_sep = 2.*self.cluster_criteria[0]\n self.nsteps = nsteps\n # Prime the acorns information:\n # cluster_arr will be updated with the indices of new clusters\n self.cluster_arr = gen_cluster_arr(self, data, stop)\n self.clusters = {}\n self.forest = {}\n\n#==============================================================================#\n \"\"\"\n Main controlling routine for acorns\n \"\"\"\n\n # Get the unassigned data array\n find_unassigned_data(self, data, stop)\n\n # Gen KDTree\n tree = generate_kdtree(self)\n\n # Generate the unassigned data array\n unassigned_array_length = len(self.unassigned_data[0,:])\n\n count= 0.0\n if verbose:\n progress_bar = print_to_terminal(self, 0, data, count, \\\n unassigned_array_length, method)\n\n # Cycle through the unassigned array\n starthierarchy = time.time()\n for i in range(0, unassigned_array_length):\n\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n\n # Extract the current data point\n data_point = np.array(self.unassigned_data[:,i])\n # Retrieve this data point's location in the data array\n data_idx = get_data_index(self, data, data_point)\n self.cluster_arr[0,i] = int(data_idx)\n\n # Every data point begins as a new cluster\n self.cluster_idx = i\n bud_cluster = Cluster(data_point, data_idx, idx=self.cluster_idx, acorns=self)\n\n # Calculate distances between all data points\n link = get_links(self, i, i, tree, n_jobs)\n\n # Find clusters that are closely associated with the current data\n # point\n linked_clusters = find_linked_clusters(self, data, i, bud_cluster, link)\n\n if (self.method==1) & (len(linked_clusters) >= 1):\n linked_clusters = check_other_components(self, i, i, data_idx, data, linked_clusters, bud_cluster, tree, n_jobs, re=False)\n\n \"\"\"\n\n Notes\n -----\n\n Now try and merge this cluster with surrounding linked_clusters.\n From this point on there are three options for that data_point:\n\n 1. If no linked clusters are found - add the bud cluster to the\n cluster dictionary.\n 2. If a single linked cluster is found - merge the two.\n 3. If multiple linked clusters are found, check the validity of each\n cluster and either merge non-independent clusters or form a\n branch.\n\n This philosophy follows that of agglomerative hierarchical\n clustering techniques. The basic principle is discussed here:\n http://scikit-learn.org/stable/modules/clustering.html under\n \"2.3.6. Hierarchical Clustering\".\n\n A single link measure is used to connect clusters. The strategy is\n adapted from the general methods of:\n\n astrodendro:\n https://github.com/dendrograms/astrodendro\n Copyright (c) 2013 Thomas P. Robitaille, Chris Beaumont, Braden\n MacDonald, and Erik Rosolowsky\n quickclump:\n https://github.com/vojtech-sidorin/quickclump\n Copyright (c) 2016 Vojtech Sidorin\n\n When linking using the \"PPV\" methodology, single link measures may\n be insufficient and additional connectivity constraints are applied.\n Specifically - it is imposed that no two spectral features extracted\n from the same location can be merged into the same cluster.\n\n Additionally, an additional linking strategy is implemented which\n takes into account of the variance in the properties of the linked\n clusters (specifically those selected by the user). This is only\n implemented when trying to resolve ambiguities and is used as a way\n of establishing the \"strongest\" links when multiple spectral\n features have been detected.\n\n \"\"\"\n\n if not linked_clusters:\n add_to_cluster_dictionary(self, bud_cluster)\n elif len(linked_clusters) == 1:\n merge_into_cluster(self, data, linked_clusters[0], bud_cluster)\n else:\n resolve_ambiguity(self, data, linked_clusters, bud_cluster)\n\n if verbose:\n progress_bar.progress = 100\n progress_bar.show_progress()\n print('')\n print('')\n\n # Remove insignificant clusters from the clusters dictionary and update\n # the unassigned array\n cluster_list, cluster_indices = update_clusters(self, data)\n\n # Take a second pass at the data without relaxing the linking criteria\n # to pick up any remaining stragglers not linked during the first pass\n if (np.size(self.unassigned_data_updated)>1):\n cluster_list, cluster_indices = relax_steps(self, 0, data, method, verbose, tree, n_jobs, second_pass=True)\n endhierarchy = time.time()-starthierarchy\n\n#==============================================================================#\n \"\"\"\n Secondary controlling routine for acorns implemented if the linking\n criteria are relaxed by the user\n\n \"\"\"\n\n if relaxcond and (not interactive) and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n inc = self.relax/self.nsteps\n cluster_criteria_original = cluster_criteria\n for j in range(1, self.nsteps+1):\n self.cluster_criteria = get_relaxed_cluster_criteria(j*inc, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n endrelax = time.time()-startrelax\n\n elif interactive and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n cluster_criteria_original = cluster_criteria\n #plotting.plot_scatter(self)\n stop = True\n while (not stop): #stop != False:\n self.relax = np.array(eval(input(\"Please enter relax values in list format: \")))\n print('')\n self.cluster_criteria = get_relaxed_cluster_criteria(self.relax, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n #plotting.plot_scatter(self)\n s = str(input(\"Would you like to continue? \"))\n print('')\n stop = s in ['True', 'T', 'true', '1', 't', 'y', 'yes', 'Y', 'Yes']\n endrelax = time.time()-startrelax\n\n else:\n startrelax = time.time()\n endrelax = time.time()-startrelax\n\n#==============================================================================#\n \"\"\"\n Tidy everything up for output\n\n \"\"\"\n\n cluster_list, cluster_indices = update_clusters(self, data)\n io.reshape_cluster_array(self, data)\n get_forest(self, verbose)\n\n end = time.time()-start\n\n if verbose:\n print('acorns took {0:0.1f} seconds for completion.'.format(end))\n print('Primary clustering took {0:0.1f} seconds for completion.'.format(endhierarchy))\n if relaxcond==True:\n print('Secondary clustering took {0:0.1f} seconds for completion.'.format(endrelax))\n print('')\n print('acorns found a total of {0} clusters.'.format(len(self.clusters)))\n print('')\n print('A total of {0} data points were used in the search.'.format(len(self.unassigned_data[0,:])))\n print('A total of {0} data points were assigned to clusters.'.format(num_links(self)))\n if (np.size(self.unassigned_data_relax)>1):\n print('A total of {0} data points remain unassigned to clusters.'.format(len(self.unassigned_data_relax[0,:])))\n else:\n print('A total of 0 data points remain unassigned to clusters.')\n print('')\n\n io.housekeeping(self)\n\n return self" ]
[ "0.5801649", "0.57721746", "0.57621926", "0.5752019", "0.570017", "0.5656524", "0.5650794", "0.558717", "0.55626065", "0.5558151", "0.55303484", "0.55301005", "0.5512373", "0.55113596", "0.5471883", "0.5464631", "0.54539907", "0.54456383", "0.5424106", "0.54218847", "0.54159504", "0.5405871", "0.5403188", "0.5399336", "0.53667796", "0.5360164", "0.5359057", "0.53580064", "0.5353712", "0.5346713", "0.5345591", "0.53440684", "0.5338678", "0.5337696", "0.53310436", "0.53229123", "0.5314444", "0.5312951", "0.5310848", "0.5304485", "0.5299368", "0.5293232", "0.5293068", "0.52915543", "0.52888244", "0.52848333", "0.52848035", "0.5277661", "0.5268267", "0.5260446", "0.52528673", "0.5249518", "0.52394783", "0.52311295", "0.52263975", "0.52215534", "0.52215534", "0.52215534", "0.52195984", "0.5216274", "0.52124816", "0.520839", "0.52041763", "0.52025324", "0.5198382", "0.5195485", "0.51944155", "0.51944155", "0.5192368", "0.5190939", "0.5190836", "0.5189709", "0.51885617", "0.5186252", "0.5182991", "0.51780164", "0.5177961", "0.5174808", "0.51725763", "0.5162978", "0.5157381", "0.5156736", "0.51547694", "0.5154097", "0.5152245", "0.5141969", "0.5140261", "0.5138444", "0.51373225", "0.5134998", "0.5130033", "0.51300305", "0.512527", "0.51232", "0.5121959", "0.5121112", "0.51206845", "0.51179", "0.51169324", "0.5116366" ]
0.7114801
0
som and bmu_ind depending on the lattice "hexa" or "rect" we have different grid distance functions. bmu_ind is a number between 0 and number of nodes1. depending on the map size bmu_coord will be calculated and then distance matrix in the map will be returned
def grid_dist(self,bmu_ind): try: lattice = getattr(self, 'lattice') except: lattice = 'hexa' print 'lattice not found! Lattice as hexa was set' if lattice == 'rect': return rect_dist(self,bmu_ind) elif lattice == 'hexa': try: msize = getattr(self, 'mapsize') rows = msize[0] cols = msize[1] except: rows = 0. cols = 0. pass #needs to be implemented print 'to be implemented' , rows , cols return np.zeros((rows,cols))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_nbh_distance_weight_matrix(\n self, neighborhood_func: float, bmu_pos: Tuple[int, int]\n ) -> np.ndarray:\n dist_mat = np.linalg.norm(self.node_list_ - bmu_pos, axis=1)\n\n pseudogaussian = np.exp(\n -np.divide(\n np.power(dist_mat, 2), (2 * np.power(neighborhood_func, 2))\n )\n )\n\n if self.nbh_dist_weight_mode == \"pseudo-gaussian\":\n return pseudogaussian.reshape((self.n_rows, self.n_columns, 1))\n\n if self.nbh_dist_weight_mode == \"mexican-hat\":\n mexicanhat = np.multiply(\n pseudogaussian,\n np.subtract(\n 1,\n np.divide(\n np.power(dist_mat, 2), np.power(neighborhood_func, 2)\n ),\n ),\n )\n return mexicanhat.reshape((self.n_rows, self.n_columns, 1))\n\n raise ValueError(\n \"Invalid nbh_dist_weight_mode: \" + str(self.nbh_dist_weight_mode)\n )", "def _get_nbh_distance_weight_block(\n self, nbh_func: float, bmus: List[Tuple[int, int]]\n ) -> np.ndarray:\n dist_weight_block = np.zeros((len(bmus), self.n_rows, self.n_columns))\n\n for i, bmu_pos in enumerate(bmus):\n dist_weight_block[i] = self._get_nbh_distance_weight_matrix(\n nbh_func, bmu_pos\n ).reshape((self.n_rows, self.n_columns))\n\n return dist_weight_block", "def find_bmu(t, net, m):\n #inicializa o index\n bmu_index = np.array([0,0])\n #inicia distancia minima para um numero bem grande\n min_dist = np.iinfo(np.int).max\n #anda pela matriz de pesos e procura menor distancia do vetor t\n for x in range(net.shape[0]):\n for y in range(net.shape[1]):\n #pesos atuais que estou considerando\n w = net[x, y, :].reshape(m,1) #transforma matriz em vetor 3D\n #calcula distancia euclidiana ao quadrado (evita tirar raiz)\n sq_dist = np.sum((w - t) ** 2) #soma as diferencas ao quadrado de cada valor do vetor\n if sq_dist < min_dist: #se distancia eh menor salva valor e index\n min_dist = sq_dist\n bmu_index = np.array([x,y])\n\n #depois de percorrer a matriz tenho a menor distancia e o index do vetor BMU\n #pega vetor dentro do net\n bmu = net[bmu_index[0], bmu_index[1], :].reshape(m,1)\n #retorna o bmu e o indice\n return (bmu, bmu_index)", "def map_vects(self, input_vects):\n \n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n \n to_return = []\n \n distances = []\n \n \n contador_adyacentes = 0\n \n matriz = np.array(list(self._neuron_locations(self._m, self._n)))\n \n m = self._m\n \n n = self._n\n \n matrices = []\n \n matrices = np.stack((matriz,\n matriz + np.array([m,n]), \n matriz - np.array([m,n]), \n matriz + np.array([m,0]),\n matriz - np.array([m,0]),\n matriz + np.array([0,n]),\n matriz - np.array([0,n]),\n matriz + np.array([m,-n]),\n matriz + np.array([-m,n])\n ))\n \n distancias_matriz = []\n\n for i in range(n*m):\n distancias_matriz.append([])\n for j in range(m*n):\n distancias_matriz[i].append(np.min(np.sum(np.power(np.subtract(matriz[i], matrices[:,j]),2), axis = 1)))\n \n distancias_matriz = np.array(distancias_matriz)\n \n \n for vect in input_vects:\n\n # min_index is the index of the BMU\n \n lista_indices = [i for i in range(len(self._weightages))]\n \n min_index = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x]))\n\n # min_index_2 is the index of the 2nd BMU\n \n lista_indices.pop(min_index) # El indice es el mismo que el valor\n \n min_index_2 = min(lista_indices,\n key=lambda x: np.linalg.norm(vect - self._weightages[x])) \n \n r2 = np.sqrt(2)\n\n if np.sqrt(distancias_matriz[min_index][min_index_2]) > r2: \n# print('loc 1')\n# print(locaciones[min_index])\n# print('loc 2')\n# print(locaciones[min_index_2])\n contador_adyacentes += 1\n\n\n distance = np.linalg.norm(vect - self._weightages[min_index])\n \n distances.append(distance)\n \n to_return.append(self._locations[min_index]) \n \n # Quantization Error qe (the mean of all distances to the BMU)!\n self.distances = distances \n \n # Topographic error te\n self.proporcion = contador_adyacentes / len(input_vects)\n \n self.prom_dist = np.mean(self.distances)\n \n return to_return", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def BM2BSM(xy, NL, KL, BM0):\n # Check if 3D or 2D\n # np.sqrt( (xy[NL[i,0],0]-xy[BL[:,1],0])**2+(xy[BL[:,0],1]-xy[BL[:,1],1])**2) ]\n '''this isn't finished....'''", "def _calc_u_matrix_distances(self) -> None:\n for u_node in itertools.product(\n range(self.n_rows * 2 - 1), range(self.n_columns * 2 - 1)\n ):\n # neighbor vector\n nb = (0, 0)\n\n if not (u_node[0] % 2) and (u_node[1] % 2):\n # mean horizontally\n nb = (0, 1)\n\n elif (u_node[0] % 2) and not (u_node[1] % 2):\n # mean vertically\n nb = (1, 0)\n\n self.u_matrix[u_node] = np.linalg.norm(\n self.unsuper_som_[u_node[0] // 2][u_node[1] // 2]\n - self.unsuper_som_[u_node[0] // 2 + nb[0]][\n u_node[1] // 2 + nb[1]\n ],\n axis=0,\n )", "def distance_map(self, scaling='sum'):\n\n if scaling not in ['sum', 'mean']:\n raise ValueError(f'scaling should be either \"sum\" or \"mean\" ('\n f'\"{scaling}\" not valid)')\n\n um = nan * zeros((self._weights.shape[0],\n self._weights.shape[1],\n 8)) # 2 spots more for hexagonal topology\n\n ii = [[0, -1, -1, -1, 0, 1, 1, 1]]*2\n jj = [[-1, -1, 0, 1, 1, 1, 0, -1]]*2\n\n if self.topology == 'hexagonal':\n ii = [[1, 1, 1, 0, -1, 0], [0, 1, 0, -1, -1, -1]]\n jj = [[1, 0, -1, -1, 0, 1], [1, 0, -1, -1, 0, 1]]\n\n for x in range(self._weights.shape[0]):\n for y in range(self._weights.shape[1]):\n w_2 = self._weights[x, y]\n e = y % 2 == 0 # only used on hexagonal topology\n for k, (i, j) in enumerate(zip(ii[e], jj[e])):\n if (x+i >= 0 and x+i < self._weights.shape[0] and\n y+j >= 0 and y+j < self._weights.shape[1]):\n w_1 = self._weights[x+i, y+j]\n um[x, y, k] = fast_norm(w_2-w_1)\n\n if scaling == 'mean':\n um = nanmean(um, axis=2)\n if scaling == 'sum':\n um = nansum(um, axis=2)\n\n return um/um.max()", "def computeB(linsys_setup):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz = True\n \n def computeCMBY(d0):\n \"\"\"\n For CMB, y = S^1/2 A N^-1 d, where S is CMB signal covariance matrix (Cl's)\n \"\"\"\n # N.B. Reshaping operations required to go between 2D pixel arrays and \n # 1D vector (for linear system)\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny,nx))\n d1 *= ninvs[freq]\n a_l = fft.fft(d1,axes=[-2,-1])\n a_l *= beams[freq]*precond_2d\n d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True))\n d1 = numpy.reshape(d1,(nx*ny))\n d2 += d1\n return d2\n \n def computeClusterY(d0):\n \"\"\"\n For cluster, y = F^T A^T N^-1 d, where F is TSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[0][ic][freq] * g_nu[freq])\n return d2\n \n def computeClusterKSZY(d0):\n \"\"\"\n For cluster, y = K^T A^T N^-1 d, where K is KSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[1][ic][freq])\n return d2\n \n def computeMonopoleY(d0):\n \"\"\"\n Overall monopole amplitude.\n \"\"\"\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2 += numpy.sum(d1 * ninvs[freq])\n return(d2)\n \n \n # CMB realisation; convolve white noise map with beam and multiply by \n # signal covmat S^1/2 in harmonic space\n b0 = numpy.random.randn(ny,nx)\n a_l = numpy.fft.fft2(b0, b0.shape)\n a_l *= precond_2d * power_2d**(-0.5)\n b0 = numpy.fft.irfft2(a_l, b0.shape)\n \n # Calculate per-band noise realisation.\n # Multiply by pixel-space N^1/2, convolve with beam, and sum over \n # cluster pixels to get RHS\n b1 = 0; b4 = 0\n b2 = numpy.zeros(nCluster)\n if ksz: b3 = numpy.zeros(nCluster)\n \n for freq in range(nFreq):\n _b = numpy.random.randn(ny,nx) * ninvs[freq]**0.5\n a_l = numpy.fft.fft2(_b) * beams[freq] * precond_2d\n b1 += numpy.fft.irfft2(a_l, _b.shape)\n b4 += numpy.sum(_b)\n for ic in range(nCluster):\n b2[ic] += numpy.sum( _b * g_nu[freq] * clumaps[0][ic][freq] )\n if ksz: b3[ic] += numpy.sum( _b * clumaps[1][ic][freq] )\n\n b0 = numpy.reshape(b0,(nx*ny))\n b1 = numpy.reshape(b1,(nx*ny))\n \n\n # Compute CMB and cluster data parts of b\n b_CMB = computeCMBY(datamaps) + b0 + b1\n b_mono = computeMonopoleY(datamaps) + b4\n b_tsz = computeClusterY(datamaps) + b2\n if ksz: b_ksz = computeClusterKSZY(datamaps) + b3\n \n # Return total b vector (Ncmbpix + 1 + (1|2)*Ncluster elements in vector)\n b = numpy.append(b_CMB, b_mono)\n b = numpy.append(b, b_tsz)\n if ksz: b = numpy.append(b, b_ksz)\n return b", "def SALT2_MMDist(numSN,\n cm=-0.0474801042369, cs1=0.0965032273527, cs2=0.042844366359,\n x1m=0.872727291354, x1s1=0.358731835038, x1s2=1.42806797468,\n mm=10.701690617, ms1=0.334359086569, ms2=1.0750402101,\n mBm=-19.0199168813, mc=-0.0838387899933, mt=10.,\n cc=3.20907949118, cx1=-0.137042055737):\n color = double_gauss(cm, cs1, cs2, size=numSN)\n x1 = double_gauss(x1m, x1s1, x1s2, size=numSN)\n mass = double_gauss(mm, ms1, ms2, size=numSN)\n\n mB = mBm + mc * (mass > 10.) + cc * color + cx1 * x1\n\n return mB, x1, color, mass", "def make_mol_kernel(drugs):\n\n dict_drug = drugs.dict_drug\n dict_ind2mol = drugs.dict_ind2mol\n\n # get the ECFP fingerprints\n nb_mol = drugs.nb\n X_fingerprint = np.zeros((nb_mol, 1024), dtype=np.int32)\n list_fingerprint = []\n # for i in list(dict_ind2mol.keys()):\n for i in range(nb_mol):\n dbid = dict_ind2mol[i]\n m = Chem.MolFromSmiles(dict_drug[dbid])\n list_fingerprint.append(AllChem.GetMorganFingerprint(m, 2))\n arr = np.zeros((1,))\n DataStructs.ConvertToNumpyArray(\n AllChem.GetMorganFingerprintAsBitVect(m, \n 2, \n nBits=1024), \n arr)\n X_fingerprint[i, :] = arr\n\n # get the Tanimoto Similarity Matrix\n K = np.zeros((len(list_fingerprint), len(list_fingerprint)))\n for i in range(len(list_fingerprint)):\n for j in range(i, len(list_fingerprint)):\n K[i, j] = DataStructs.TanimotoSimilarity(list_fingerprint[i], \n list_fingerprint[j])\n K[j, i] = K[i, j]\n\n return X_fingerprint, K", "def get_center_of_mass_allies(self,obs):", "def get_bmu(\n self, datapoint: np.ndarray, som_array: np.ndarray\n ) -> Tuple[int, int]:\n a = self._get_node_distance_matrix(\n datapoint.astype(np.float64), som_array\n )\n\n return np.argwhere(a == np.min(a))[0]", "def mi_from_dm(distance_matrix, ns, nh, spike_train_list=None):\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n \n if spike_train_list is not None:\n\n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n\n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 0\n \n if i not in members_of_glob:\n for j in nearest_neighbours[i]:\n if j not in members_of_glob:\n if spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1 # count neigbours out of glob\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*f_i # if one neighbour is in glob, all following neighb are as well\n break\n counts.append(c_i)\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += 1 + (nh - 1)*f_i #If in glob, take fraction of remaining neighbours except you\n counts.append(c_i)\n \n counts = np.array(counts)\n \n else:\n \n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 1\n for j in nearest_neighbours[i]:\n if (i != j and abs(i - j)%ns==0 ):\n c_i += 1 \n counts.append(c_i)\n counts = np.array(counts) \n \n I = sum(np.log2(counts*ns/float(nh))) / float(nr)\n\n return I", "def get_center_of_mass_enemies(self,obs):", "def mi_from_dm_alt(distance_matrix, ns, nh, spike_train_list=None):\n \n #print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n #print \"finished sorting\"\n #return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n \n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I", "def get_gaussian_maps_2d(mu, sigma, shape_hw, mode='rot'):\n with tf.name_scope(None, 'gauss_map', [mu]):\n\n y = tf.cast(tf.linspace(-1.0, 1.0, shape_hw[0]), tf.float64)\n x = tf.cast(tf.linspace(-1.0, 1.0, shape_hw[1]), tf.float64)\n\n [x,y] = tf.meshgrid(x,y)\n xy = tf.stack([x, y], axis=-1)\n xy = tf.stack([xy] * nb_landmarks, axis=0)\n xy = tf.reshape(xy, [1, nb_landmarks, shape_hw[0], shape_hw[1], 2])\n mu = tf.reshape(mu, [-1, nb_landmarks, 1, 1, 2])\n invsigma = tf.linalg.inv(sigma)\n invsigma = tf.reshape(invsigma, [-1, nb_landmarks, 1, 2, 2])\n pp = tf.tile(invsigma, [1, 1, shape_hw[1], 1, 1])\n X = xy-mu\n dist = tf.matmul(X,pp)\n dist = tf.reduce_sum((dist*X), axis=-1)\n\n g_yx = tf.exp(-dist)\n\n g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])\n\n return g_yx", "def calc_synLocations(post_branches, n_syns, dist):\n\n\t\t\t\tassert dist in ['uniform', 'random', 'one'], 'Which synapse distribution for %s population? (uniform/random/one) '%self.population_name\n\t\t\t\t\n\t\t\t\tn_branches = len(post_branches)\n\t\t\t\tbranch_locs = {}\n\t\t\t\t\n\t\t\t\tif dist == 'uniform':\n\t\t\t\t\traise Exception('uniform', '{} dist is under construction!'.format(dist))\n\t\t\t\t\t# density = n_syns / L\n\t\t\t\t\t# locs = sorted(np.arange(0, L, 1/density))\n\t\t\t\t\t# locs = [i/L for i in locs]\n\n\t\t\t\t\t# assert len(locs)==n_syns, ['Sanity check warning: unexpected locs length!', pdb.set_trace()]\n\n\t\t\t\telif dist == 'random':\n\t\t\t\t\t\n\t\t\t\t\tfor i in range(n_syns):\n\n\t\t\t\t\t\t# Randomly choose branch\n\t\t\t\t\t\trand_branch_idx = np.random.randint(n_branches)\n\t\t\t\t\t\trand_branch \t = post_branches[rand_branch_idx]\n\t\t\t\t\t\trand_branch_name = rand_branch.name().split('].')[-1]\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Randomly choose location\n\t\t\t\t\t\trand_loc = np.random.rand()\n\n\t\t\t\t\t\tif rand_branch_name in branch_locs.keys():\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['locs'].append(rand_loc)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name] \t\t\t\t= {}\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['locs'] \t\t= [rand_loc]\n\t\t\t\t\t\t\tbranch_locs[rand_branch_name]['branch_obj'] = rand_branch\t\t\t\t\t\t\t\t\n\n\t\t\t\t\tfor key in branch_locs:\n\t\t\t\t\t\tbranch_locs[key]['locs'] = sorted(branch_locs[key]['locs'])\n\t\t\t\t\n\t\t\t\telif dist == 'one':\n\t\t\t\t\tsingle_branch_idx \t= np.random.randint(n_branches)\n\t\t\t\t\tsingle_branch \t \t= post_branches[single_branch_idx]\n\t\t\t\t\tsingle_branch_name \t= single_branch.name().split('].')[-1]\n\t\t\t\t\t\n\t\t\t\t\tbranch_locs[single_branch_name] = {'branch_obj': single_branch, 'locs': [0.5]*n_syns}\n\n\t\t\t\treturn branch_locs", "def return_BMU_coord(self, sess, input_array):\n output = sess.run([self.distance_matrix,self.distance_argmin], feed_dict={self.input_placeholder: input_array})\n index = output[1] #flatten index\n row = index/self.tot_cols\n col = index - (row*self.tot_cols)\n return index, (row,col)", "def find_halos(pos, ngrid, log, level=3000):\n print('Binning particles', file=log)\n cells = get_cells(pos, ngrid, log)\n count = bincount(cells, minlength=ngrid**3)\n count.shape = (ngrid,ngrid,ngrid)\n print('Count in', count.min(), count.max(), file=log)\n idx = flatnonzero(count>level)\n print('Number of cells above', level, 'is', len(idx), file=log)\n \n \n labels, num_features = ndimage.label(count>level)\n print('Number fo features', num_features, file=log)\n print('Labels in', labels.min(), labels.max(), file=log)\n locations = ndimage.find_objects(labels)\n\n dense_regions = []\n\n for i in range(num_features):\n loc = locations[i]\n hw = max(l.stop - l.start for l in loc) * 0.5 /ngrid\n hw_padded = hw + 0.0/ngrid\n\n ctr =[(0.5/ngrid)*(l.stop + l.start) for l in loc]\n count_i = count[loc][labels[loc]==(i+1)].sum()\n print('Count', count_i, file=log)\n dense_regions.append((count_i, ctr, hw_padded))\n\n # sort by number of particles in the region\n dense_regions = sorted(dense_regions, key = lambda num_ctr_hw :num_ctr_hw[0], reverse=True)\n\n return dense_regions", "def mi_from_dm_alt_hq(distance_matrix, ns, nh, spike_train_list=None):\n \n print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n #nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n nearest_neighbours = np.array([np.array(hq.nsmallest(nh, r)) for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n print \"finished sorting\"\n return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I", "def get_gaussian_maps(mu, sigmax, sigmay, covs, shape_hw, mode='rot'):\n with tf.name_scope(None, 'gauss_map', [mu]):\n # mu_y, mu_x = mu[:, :, 0:1], mu[:, :, 1:2]\n\n y = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[0]))\n\n x = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[1]))\n [x,y] = tf.meshgrid(x,y)\n xy = tf.stack([x, y], axis=-1)\n xy = tf.stack([xy] * nb_landmarks, axis=0)\n xy = xy[None, : ,:, :, :]\n if mode in ['rot', 'flat']:\n mu = mu[:,:,None, None,:]\n\n invsigma = tf.stack([sigmay**2, -covs, -covs, sigmax**2], axis=-1)\n invsigma = tf.reshape(invsigma, [-1, nb_landmarks, 2,2])\n denominator = (sigmax*sigmay)**2 - covs**2\n denominator = tf.expand_dims(tf.expand_dims(denominator, -1), -1)\n invsigma = invsigma/(denominator+1e-7)\n invsigma = tf.cast(invsigma, tf.float32)\n pp = tf.tile(invsigma[:, :, None, :, :], [1, 1, shape_hw[1], 1, 1])\n X = xy-mu\n dist = tf.matmul(X,pp)\n dist = tf.reduce_sum((dist*X), axis=-1)\n\n\n if mode == 'rot':\n g_yx = tf.exp(-dist)\n else:\n g_yx = tf.exp(-tf.pow(dist + 1e-5, 0.25))\n\n else:\n raise ValueError('Unknown mode: ' + str(mode))\n\n g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])\n return g_yx", "def get_mi(x, y, k=1, normalize=None, norm=np.inf, estimator='ksg'):\n\n if normalize:\n x = normalize(x)\n y = normalize(y)\n\n # construct state array for the joint process:\n xy = np.c_[x,y]\n\n if estimator == 'naive':\n # compute individual entropies\n hx = get_h(x, k=k, norm=norm)\n hy = get_h(y, k=k, norm=norm)\n hxy = get_h(xy, k=k, norm=norm)\n\n # compute mi\n mi = hx + hy - hxy\n\n elif estimator == 'ksg':\n\n # store data pts in kd-trees for efficient nearest neighbour computations\n # TODO: choose a better leaf size\n x_tree = cKDTree(x)\n y_tree = cKDTree(y)\n xy_tree = cKDTree(xy)\n\n # kth nearest neighbour distances for every state\n # query with k=k+1 to return the nearest neighbour, not counting the data point itself\n # dist, idx = xy_tree.query(xy, k=k+1, p=norm)\n dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)\n epsilon = dist[:, -1]\n\n # for each point, count the number of neighbours\n # whose distance in the x-subspace is strictly < epsilon\n # repeat for the y subspace\n n = len(x)\n nx = np.empty(n, dtype=np.int)\n ny = np.empty(n, dtype=np.int)\n for ii in range(n):\n # nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n # ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1\n nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1\n\n mi = digamma(k) - np.mean(digamma(nx+1) + digamma(ny+1)) + digamma(n) # version (1)\n # mi = digamma(k) -1./k -np.mean(digamma(nx) + digamma(ny)) + digamma(n) # version (2)\n\n elif estimator == 'lnc':\n # TODO: (only if you can find some decent explanation on how to set alpha!)\n raise NotImplementedError(\"Estimator is one of 'naive', 'ksg'; currently: {}\".format(estimator))\n\n else:\n raise NotImplementedError(\"Estimator is one of 'naive', 'ksg'; currently: {}\".format(estimator))\n\n return mi", "def generate_map(seed):\n def set_biomes(field, points):\n for row in range(len(field)):\n # For every cell, we find the closest point\n for cell in range(len(field[row])):\n # Store the currently closest point:\n shortest_dist = -1\n # Stores the biome of the current point:\n current_biome = '_'\n\n # Iterate over the points to find the closest one\n for point in points:\n # Calculate the euclidean distance\n xdiff = point[0] - row\n ydiff = point[1] - cell\n distance = xdiff * xdiff + ydiff * ydiff # Square root not needed since we're only comparing\n\n # If this is currently the shortest distance, set it\n if distance < shortest_dist or shortest_dist == -1:\n shortest_dist = distance\n # Set the biome that will be chosen if a shorter distance isn't found\n current_biome = point[2]\n\n # Select a random field in the biome, taking rarity into account\n\n # Get names/data of all fields in the chosen biome\n biome_fields = biomes[current_biome]['fields'].items()\n # Extract field names and their rarities (weights)\n field_data = [(name, data['rarity']) for name, data in biome_fields]\n # Choose a random field using the weights\n field_index = weighted_choice([field_weight[1] for field_weight in field_data])\n # Set the cell's field\n field[row][cell] = field_data[field_index][0]\n\n return field\n\n def poisson_disc_samples(width, height, r, k=5):\n \"\"\"\n \"Two-dimensional Poisson Disc Sampling using Robert Bridson's algorithm.\"\n Modified version of https://github.com/emulbreh/bridson.\n \"\"\"\n tau = 2 * pi\n cellsize = r / sqrt(2)\n\n grid_width = int(ceil(width / cellsize))\n grid_height = int(ceil(height / cellsize))\n grid = [None] * (grid_width * grid_height)\n\n def distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n return sqrt(dx * dx + dy * dy)\n\n def grid_coords(p2):\n return [int(floor(p2[0] / cellsize)), int(floor(p2[1] / cellsize))]\n\n def fits(p2, gx, gy):\n yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height)))\n\n for x in range(max(gx - 2, 0), min(gx + 3, grid_width)):\n for y in yrange:\n g = grid[x + y * grid_width]\n if g is None:\n continue\n if distance(p2, g) <= r:\n return False\n return True\n\n p = [width * rnd.random(), height * rnd.random()]\n queue = [p]\n grid_x, grid_y = grid_coords(p)\n grid[grid_x + grid_y * grid_width] = p\n\n while queue:\n qi = int(rnd.random() * len(queue))\n qx, qy = queue[qi]\n queue[qi] = queue[-1]\n queue.pop()\n\n for _ in range(k):\n alpha = tau * rnd.random()\n d = r * sqrt(3 * rnd.random() + 1)\n px = qx + d * cos(alpha)\n py = qy + d * sin(alpha)\n\n if not (0 <= px < width and 0 <= py < height):\n continue\n p = [px, py]\n grid_x, grid_y = grid_coords(p)\n\n if not fits(p, grid_x, grid_y):\n continue\n queue.append(p)\n grid[grid_x + grid_y * grid_width] = p\n return [p for p in grid if p is not None]\n\n # Define map dimensions and settings\n # Size should be at least 35x35\n\n height = 50\n width = 50\n\n # Create a new instance of Random() using a given seed\n\n rnd = random.Random(seed)\n\n # Generate a random starting location somewhere in the middle of the map\n\n x = rnd.randint(width - 10, width + 10)\n y = rnd.randint(height - 10, height + 10)\n\n # Create a 2-dimensional list for the game map\n\n field = [['_'] * width for _ in range(height)]\n\n # Create random points that will be the starting positions of biomes\n\n points = poisson_disc_samples(width, height, 3, 5)\n rnd.shuffle(points)\n\n for i in range(len(points)):\n biome = rnd.choice(list(biomes.keys())) # Set a random biome\n\n points[i][0] = int(round(points[i][0])) - 1 # x\n points[i][1] = int(round(points[i][1])) - 1 # y\n points[i].append(biome)\n\n field[points[i][1]][points[i][0]] = 'X' # not needed?\n\n # Set the biomes\n\n field = set_biomes(field, points)\n\n return ({\n 'field': field,\n 'x': x,\n 'y': y\n })", "def getLocalMap(dist_compl):\n sdc=dist_compl*RES\n #clms are real ;)\n #rws are imaginary :D #rows\n map_padd = 1*RES #add a meter\n rws_ofs = abs(sdc.imag.min())+map_padd #offsetX\n rws = abs(sdc.imag.max())+(rws_ofs)\n clms_ofs = abs(sdc.real.min())+map_padd\n clms = abs(sdc.real.max())+(clms_ofs)\n M = np.zeros((np.round(rws+map_padd).astype(int),np.round(clms+map_padd).astype(int))).astype(dtype=MAP_D_TYPE)#empty local map\n Mg = M.copy()\n points = sdc + np.array([clms_ofs+1j*rws_ofs]) #scale\n #M[points.imag.astype(int),points.real.astype(int)]=10 \n for p in points:\n r=np.round(p.imag).astype(int)\n c=np.round(p.real).astype(int)\n try:\n #draw line in matrix\n lc = [np.round(rws_ofs).astype(int),np.round(clms_ofs).astype(int),r,c]\n rr, cc, val = line_aa(*lc) #not really demaning --> 1%\n M[rr, cc] = np.logical_or(M[rr,cc]>0, val>0) \n #add gaussian\n Mg[r-GPoints//2:r+GPoints//2,c-GPoints//2:c+GPoints//2]+=Gau\n except:\n print('Error: out of array when calculating the local map',r,c)\n Mg[Mg>100]=100 #cap the gaussian matrix\n car_pos_in_loc_mat = np.array([np.round(clms_ofs).astype(int), np.round(rws_ofs).astype(int)])\n #Mg[car_pos_in_loc_mat[1],car_pos_in_loc_mat[0]]=300 #add car pos\n return M*(-100)+Mg, car_pos_in_loc_mat", "def som_step(centers,data,neighbor,eta,sigma):\n size_k = int(np.sqrt(len(centers)))\n \n #find the best matching unit via the minimal distance to the datapoint\n b = np.argmin(np.sum((centers - np.resize(data, (size_k**2, data.size)))**2,1))\n\n # find coordinates of the winner\n a,b = np.nonzero(neighbor == b)\n \n # update all units\n for j in range(size_k**2):\n # find coordinates of this unit\n a1,b1 = np.nonzero(neighbor==j)\n # calculate the distance and discounting factor\n disc=gauss(np.sqrt((a-a1)**2+(b-b1)**2),[0, sigma])\n # update weights \n centers[j,:] += disc * eta * (data - centers[j,:])", "def make_indp_tiles(tf_map, nc_sum, mu_sq):\n tiles = tf_map.copy()\n # Here's the deal: we're going to keep only the valid output and\n # it's *always* going to exist in the lowest available indices\n stride = nc_sum + 1\n for i in xrange(tiles.shape[0]/stride):\n numpy.absolute(tiles[stride*i:stride*(i+1)].sum(axis=0), tiles[stride*(i+1)-1])\n\n # Do the proper normalization\n return tiles[nc_sum::nc_sum+1].real**2 / mu_sq[nc_sum::nc_sum+1].reshape(-1, 1)", "def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat", "def _initialize_mapbias(self):\n self.mapbias = sharedX(\n numpy.zeros(self.nmap),\n name='mb',\n borrow=True\n )", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def holstein_mpo(n_bath, d_boson, e0, eks, vks):\n\n bN = np.diag(range(d_boson))\n bp = np.diag([np.sqrt(x) for x in range(1,d_boson)],k=-1) # -1 below diagonal\n bm = np.diag([np.sqrt(x) for x in range(1,d_boson)],k= 1) # 1 above diagonal \n Sx = np.array([[0.,1.],[1.,0.]])\n Sz = np.array([[1.,0.],[0.,-1.]])\n\n Dw = 3\n df = 2\n db = d_boson\n phys_bonds = [df] + [db]*n_bath\n\n L = n_bath + 1\n \n W = []\n\n ## MPO on spin site\n WS = np.zeros([1,df,df,self.Dw])\n WS[0,:,:,0] = e0*self.ops['SX'] # site energy\n WS[0,:,:,1] = self.ops['SZ'] # creation of fermion\n WS[0,:,:,2] = np.eye(df)\n W.append(Ws)\n\n ## MPO on bath sites\n Wb = np.zeros([self.Dw,db,db,self.Dw])\n Wb[0,:,:,0] = np.eye(db)\n Wb[1,:,:,1] = np.eye(db) # propagates SZ interaction term\n Wb[2,:,:,2] = np.eye(db)\n\n for i in range(L-1): \n Wb[1,:,:,0] = vks[i]*self.ops['b-'] + np.conj(vks[i])*self.ops['b+'] # interaction with spin\n Wb[2,:,:,0] = eks[i]*self.ops['bn'] # energy of boson \n W.append(Wb)\n \n\n WL = np.zeros([self.Dw,db,db,1])\n WL[0,:,:,0] = np.eye(db)\n WL[1,:,:,0] = vks[-1]*self.ops['b-'] + np.conj(vks[-1])*self.ops['b+'] # interaction with spin\n WL[2,:,:,0] = eks[-1]*self.ops['bn'] # energy of boson\n W.append(WL)\n\n return W", "def test_one_center(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]]])\n atom2rcut=np.array([5.0])\n g = dft.gen_grid.Grids(sv)\n g.level = 1 # precision as implemented in pyscf\n g.radi_method=leggauss_ab\n g.build(atom2rcut=atom2rcut)\n\n #print( max( np.linalg.norm(g.coords, axis=1) ) )\n #print( g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0 )\n self.assertAlmostEqual(max( np.linalg.norm(g.coords, axis=1) ), 4.9955942742763986)\n self.assertAlmostEqual(g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0)\n self.assertEqual(len(g.weights), 6248)", "def get_sigma_map(start_x = 0,field_height=100,field_width=100,viewing_distance=12.0,screen_pixel_size=0.282,debug=False):\n start_x_pixels = np.round(get_pixels_at_degrees(degrees=start_x,viewing_distance=viewing_distance,screen_pixel_size=screen_pixel_size))\n optical_nodal_distance = 17.0 # mm from lens to fovea\n viewing_distance_inches = viewing_distance\n viewing_distance = viewing_distance * 25.4 # mm\n center_y, center_x = 0,0\n x_coords = (start_x_pixels + np.arange(-field_width/2.0,field_width/2,1))*screen_pixel_size\n y_coords = np.arange(-field_height/2.0,field_height/2,1)*screen_pixel_size\n x,y = np.meshgrid(x_coords,y_coords)\n coords = np.vstack((y.ravel(),x.ravel())).T\n\n image_dist = cdist(np.matrix([center_y,center_x]),coords)\n fovea_dist = (np.pi/180.0)*optical_nodal_distance*get_degrees_at_pixels(pixels=image_dist/screen_pixel_size,viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n midget_dendritic_field_diameter_micrometers = 8.64 * np.power(fovea_dist,1.04) # midget from Dacey and Peterson, 1994\n midget_dendritic_field_diameter_millimeters = midget_dendritic_field_diameter_micrometers/1000.0\n midget_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n\n midget_sigma_map = midget_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n midget_sigma_map = midget_sigma_map.reshape((field_height,field_width))\n\n parasol_dendritic_field_diameter_micrometers = 70.2 * np.power(fovea_dist,0.65) # parasol from Dacey and Peterson, 1994\n parasol_dendritic_field_diameter_millimeters = parasol_dendritic_field_diameter_micrometers/1000.0\n parasol_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n parasol_sigma_map = parasol_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n parasol_sigma_map = parasol_sigma_map.reshape((field_height,field_width))\n\n return midget_sigma_map,parasol_sigma_map", "def alignMono(entry,prec=1E-4,seed_index=0,supercell=2,\n c_mag=50,dist_from_plane=3):\n\n\n\n\n new_latt,fit_fracs_both= getNewLattice(entry,dim=2,prec=prec,\n seed_index=seed_index,\n supercell=supercell,c_mag=c_mag)\n\n fit_fracs = np.array([np.array(x)+[0,0,.5] for x in fit_fracs_both[0]])\n final_sites = np.dot(new_latt.T,fit_fracs.T).T\n # Create new lattice matricies\n lat1 = np.array([new_latt[0],new_latt[1],new_latt[2]])\n lat2 = np.array([new_latt[1],new_latt[0],new_latt[2]])\n\n # Generate atomic fractions\n new_fracs1 = np.linalg.solve(lat1.T,np.array(final_sites).T).T\n new_fracs2 = np.linalg.solve(lat2.T,np.array(final_sites).T).T\n species = fit_fracs_both[1]\n return([species,new_fracs1,lat1],[species,new_fracs2,lat2])", "def find_densest(m, w_ncols, w_nrows):\n # Implement your solution here.\n pass", "def nlm_fast(data,FS,BS,sigma,dev = None, proc = None):\n\n if dev is None:\n dev = imgtools.__DEFAULT_OPENCL_DEVICE__\n\n if dev is None:\n raise ValueError(\"no OpenCLDevice found...\")\n\n if proc is None:\n proc = OCLProcessor(dev,absPath(\"kernels/nlm_fast.cl\"),options=\"-D FS=%i -D BS=%i\"%(FS,BS))\n\n img = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg2 = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n\n accBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n weightBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n\n dev.writeImage(img,data);\n dev.writeBuffer(weightBuf,np.zeros_like(data,dtype=np.float32));\n\n for dx in range(BS+1):\n for dy in range(-BS,BS+1):\n proc.runKernel(\"dist\",img.shape,None,img,tmpImg,np.int32(dx),np.int32(dy))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg,tmpImg2,np.int32(1))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg2,distImg,np.int32(2))\n\n proc.runKernel(\"computePlus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n if any([dx,dy]):\n proc.runKernel(\"computeMinus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n acc = dev.readBuffer(accBuf,dtype=np.float32).reshape(data.shape)\n weights = dev.readBuffer(weightBuf,dtype=np.float32).reshape(data.shape)\n\n return acc/weights", "def test_uv_degrid_gaussian_kernel():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(\n max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=\"gaussian\"\n )\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 1.295932713086053e-05", "def slabldos(h,energies=np.linspace(-1.0,1.0,40),delta=None,nk=40):\n if h.dimensionality!=2: raise # nope\n ds = ldosmap(h,energies=energies,delta=delta,nk=nk)\n if len(ds[0])!=len(h.geometry.z): \n print(\"Wrong dimensions\",len(ds[0]),len(h.geometry.z))\n raise\n f = open(\"DOSMAP.OUT\",\"w\")\n f.write(\"# energy, index, DOS, zposition\\n\")\n for ie in range(len(energies)):\n for iz in range(len(h.geometry.z)):\n f.write(str(energies[ie])+\" \")\n f.write(str(iz)+\" \")\n f.write(str(ds[ie,iz])+\" \")\n f.write(str(h.geometry.z[iz])+\"\\n\")\n f.close()\n return energies,np.transpose(ds) # retunr LDOS ", "def applyMat(my_map, linsys_setup):\n \n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n \n \n\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n\n #Always apply beam * precond\n beam_prec=[]\n\n for f in range(nFreq):\n beam_prec+=[beams[f][:,:ny/2+1]*precond_2d[:,:ny/2+1]]\n precond_2d=precond_2d[:,:ny/2+1]\n power_2d=power_2d[:,:ny/2+1]\n \n ksz = False\n if len(clumaps) == 2: ksz = True\n \n # Routines to perform block matrix multiplication defined in Eriksen Eq. 19\n \n def apply_cmb_cmb(d0):\n \"\"\"\n Apply (S^-1 + A N^-1 A) x\n \"\"\"\n d1 = d0.copy()\n d1 = numpy.reshape(d1,(nx,ny))\n a_l = fft.rfft(d1,axes=[-2,-1])\n \n c_l = 0\n for f in range(nFreq):\n\n b_l = a_l * beam_prec[f]\n d2 = fft.irfft(b_l,axes=[-2,-1],normalize=True)\n d2 *= ninvs[f]\n b_l = fft.rfft(d2,axes=[-2,-1])\n c_l += b_l * beam_prec[f]\n \n d2 = fft.irfft(c_l,axes=[-2,-1],normalize=True)\n d1 = fft.irfft(precond_2d**2 * a_l/power_2d,axes=[-2,-1],normalize=True)\n \n d2 += d1\n \n return d2.reshape((nx*ny,))\n \n \"\"\"\n def apply_tsz_tsz(d0): # DONE\n \\\"\"\"\n Apply (F^T A^T N^-1 A F) x\n \\\"\"\"\n d1 = numpy.zeros(nCluster)\n mat = numpy.zeros((nCluster, nCluster))\n # TODO: This could probably be more efficient (e.g. using np.outer)\n for freq in range(nFreq):\n for ic in range(nCluster):\n for jc in range(0, ic+1):\n mat[ic,jc] = numpy.sum( ninvs[freq] * g_nu[freq]**2. \\\n * clumaps[0][ic][freq] * clumaps[0][jc][freq] )\n if ic != jc: mat[jc,ic] = mat[ic,jc]\n d1 += numpy.dot(mat, d0)\n return d1\n \n def apply_ksz_ksz(d0): # DONE\n \\\"\"\"\n Apply (K^T A^T N^-1 A K) x\n \\\"\"\"\n # FIXME: Missing factor of ivcov\n d1 = numpy.zeros(nCluster)\n mat = numpy.zeros((nCluster, nCluster))\n # TODO: This could probably be more efficient (e.g. using np.outer)\n for freq in range(nFreq):\n for ic in range(nCluster):\n for jc in range(0, ic+1):\n mat[ic,jc] = numpy.sum( ninvs[freq] \\\n * clumaps[1][ic][freq] * clumaps[1][jc][freq] )\n if ic != jc: mat[jc,ic] = mat[ic,jc]\n d1 += numpy.dot(mat, d0)\n d1 += numpy.dot(ivcov, d0) # Add prior term\n return d1\n \"\"\"\n \n def apply_cmb_foreground_block(dc, dm, dt, dk=None):\n \"\"\"\n Apply the CMB x (Monopole + TSZ + KSZ) terms in one block:\n (A^T N^-1 A T) x_mono\n (A^T N^-1 A F) x_tsz\n (A^T N^-1 A K) x_ksz\n \n (T^T A^T N^-1 A) x_cmb\n (F^T A^T N^-1 A) x_cmb\n (K^T A^T N^-1 A) x_cmb\n \"\"\"\n ksz = False\n if dk is not None: ksz = True\n \n # (A^T N^-1 A T) x_mono; (A^T N^-1 A F) x_tsz; (A^T N^-1 A K) x_ksz\n b_lt = 0; b_lk = 0; b_lm = 0\n for f in range(nFreq):\n mct = 0; mck = 0\n for ic in range(nCluster):\n mct += dt[ic] * ninvs[f] * clumaps[0][ic][f] * g_nu[f]\n if ksz: mck += dk[ic] * ninvs[f] * clumaps[1][ic][f]\n \n b_lm += fft.rfft(dm * ninvs[f],axes=[-2,-1]) * beam_prec[f]\n b_lt += fft.rfft(mct,axes=[-2,-1]) * beam_prec[f]\n if ksz: b_lk += fft.rfft(mck,axes=[-2,-1]) * beam_prec[f]\n\n mcm = fft.irfft(b_lm,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n mct = fft.irfft(b_lt,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n if ksz: mck = fft.irfft(b_lk,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n \n # (T^T A^T N^-1 A) x_cmb; (F^T A^T N^-1 A) x_cmb; (K^T A^T N^-1 A) x_cmb\n mc = dc.copy().reshape((nx,ny))\n a_l = fft.rfft(mc,axes=[-2,-1])\n mtc = numpy.zeros(nCluster)\n mkc = numpy.zeros(nCluster)\n mmc = 0\n for f in range(nFreq):\n b_l = a_l * beam_prec[f]\n mc = fft.irfft(b_l,axes=[-2,-1],normalize=True)\n mmc += numpy.sum(mc * ninvs[f])\n for ic in range(nCluster):\n mtc[ic] += numpy.sum(mc * ninvs[f] * clumaps[0][ic][f] * g_nu[f])\n if ksz: mkc[ic] += numpy.sum(mc * ninvs[f] * clumaps[1][ic][f])\n \n if ksz: return mct, mcm, mck, mtc, mmc, mkc\n return mct, mcm, mtc, mmc\n \n \n def apply_foreground_block(m0, t0, k0=None):\n \"\"\"\n Apply the TSZ + KSZ + Monopole terms in one block:\n [ (T^T A^T N^-1 A F) (T^T A^T N^-1 A K) (T^T A^T N^-1 A T) ] (x_mono)\n [ (F^T A^T N^-1 A F) (F^T A^T N^-1 A K) (F^T A^T N^-1 A T) ] (x_tsz)\n [ (K^T A^T N^-1 A F) (K^T A^T N^-1 A K) (K^T A^T N^-1 A T) ] (x_ksz)\n \"\"\"\n ksz = True if k0 is not None else False\n \n dtt, dkk, dtk, dkt = [numpy.zeros(nCluster) for i in range(4)]\n mtt, mkk, mtk, mkt = [numpy.zeros((nCluster, nCluster)) for i in range(4)]\n dmm, dmk, dmt = [0 for i in range(3)]\n dkm, dtm = [numpy.zeros(nCluster) for i in range(2)]\n \n # TODO: This could probably be more efficient (e.g. using np.outer)\n for f in range(nFreq):\n dmm += numpy.sum(ninvs[f]) * m0\n \n # Loop through clusters\n for ic in range(nCluster):\n dmt += numpy.sum( ninvs[f] * g_nu[f] * clumaps[0][ic][f] * t0[ic] )\n dtm[ic] += numpy.sum( ninvs[f] * g_nu[f] * clumaps[0][ic][f] * m0 )\n if ksz:\n dmk += numpy.sum( ninvs[f] * clumaps[1][ic][f] * k0[ic] )\n dkm[ic] += numpy.sum( ninvs[f] * clumaps[1][ic][f] * m0 )\n \n for jc in range(0, ic+1):\n mtt[ic,jc] = numpy.sum( ninvs[f] * g_nu[f]**2. \\\n * clumaps[0][ic][f] * clumaps[0][jc][f] )\n if ksz:\n mkk[ic,jc] = numpy.sum( ninvs[f] \\\n * clumaps[1][ic][f] * clumaps[1][jc][f] )\n mtk[ic,jc] = numpy.sum( ninvs[f] * g_nu[f] \\\n * clumaps[0][ic][f] * clumaps[1][jc][f] )\n mkt[ic,jc] = numpy.sum( ninvs[f] * g_nu[f] \\\n * clumaps[1][ic][f] * clumaps[0][jc][f] )\n # Mirror indices\n mtt[jc,ic] = mtt[ic,jc]\n if ksz:\n mkk[jc,ic] = mkk[ic,jc]\n mtk[jc,ic] = mtk[ic,jc]\n mkt[jc,ic] = mkt[ic,jc]\n \n # Add total contribs. for this band\n dtt += numpy.dot(mtt, t0)\n if ksz:\n dkk += numpy.dot(mkk, k0)\n dtk += numpy.dot(mtk, k0)\n dkt += numpy.dot(mkt, t0)\n \n if ksz: return dtt, dkk, dmm, dtk, dkt, dmk, dkm, dtm, dmt\n return dtt, dmm, dtm, dmt\n \n # Apply block matrix multiplications and return\n # FIXME: What if KSZ not used?\n x0 = my_map[:nx*ny]\n x1 = my_map[nx*ny:nx*ny+1]\n x2 = my_map[nx*ny+1:nx*ny+nCluster+1]\n if ksz: x3 = my_map[nx*ny+nCluster+1:nx*ny+2*nCluster+1]\n \n # Multiply input vector in blocks\n #t=time.time()\n dcc = apply_cmb_cmb(x0)\n #print 'CMB', time.time()-t\n if ksz:\n dct, dcm, dck, dtc, dmc, dkc = apply_cmb_foreground_block(x0, x1, x2, x3)\n dtt, dkk, dmm, dtk, dkt, dmk, dkm, dtm, dmt = apply_foreground_block(x1, x2, x3)\n x_new_0 = dcc + dct + dck + dcm\n x_new_1= dmc + dmt + dmk + dmm\n x_new_2 = dtc + dtt + dtk + dtm\n x_new_3 = dkc + dkt + dkk + dkm\n x_new = numpy.concatenate((x_new_0, x_new_1, x_new_2, x_new_3))\n else:\n #t=time.time()\n dct, dcm, dtc, dmc = apply_cmb_foreground_block(x0, x1, x2)\n #print 'CMB-F', time.time()-t\n \n #t=time.time()\n dtt, dmm, dtm, dmt = apply_foreground_block(x1, x2)\n #print 'F', time.time()-t\n \n x_new_0 = dcc + dct + dcm\n x_new_1 = dmc + dmt + dmm\n x_new_2 = dtc + dtt + dtm\n x_new = numpy.concatenate((x_new_0, x_new_1, x_new_2))\n\n\n#sys.exit()\n return x_new", "def cluster(self, bufr, elevmap, xpos, zpos, w, d, count, options, minscl, maxscl):\r\n #create a cluster of shapes on an elevation map\r\n blist = []\r\n for v in range(count):\r\n x = xpos + random.random() * w - w * 0.5\r\n z = zpos + random.random() * d - d * 0.5\r\n rh = random.random() * (maxscl - minscl) + minscl\r\n rt = random.random() * 360.0\r\n y = elevmap.calcHeight(x, z) + rh * 2\r\n blist.append([bufr, x, y, z, 0.0, rt, 0.0, rh, rh, rh])\r\n\r\n #self.merge(bufr, x, y, z, 0.0, rt, 0.0, rh, rh, rh)\r\n self.merge(blist)", "def genLattice(structure,in_network,dim,supercell,prec=1E-4,\n seed_index=0,c_mag=60,y_dist=-1):\n\n # Generate vectors in plane/line, relative to\n # the first atom in the network of atoms\n \n if y_dist==-1:\n y_dist=c_mag/3\n \n new = [x for x in in_network if abs(x[2])<np.pi/2]\n return_structure=False\n mat = np.array(structure.lattice.as_dict()['matrix'])\n coords = np.array([np.dot(mat.T,x.frac_coords%1) for x in structure.sites])\n specs = structure.species\n ref_ele_d = getUniqueCount(specs)\n for i in ref_ele_d:\n ref_ele_d[i]/=(supercell**dim)\n coords = coords-coords[seed_index]\n \n\n\n\n\n for lat_vectors in sorted(new,key=itemgetter(3)):\n\n # Create lattice matrix to fit atomic coordinates against\n # In 2D\n if dim==2:\n new_c = np.cross(lat_vectors[0],lat_vectors[1])\n scale_c = c_mag/magni(new_c)\n\n latt_attempt = np.array([lat_vectors[0],lat_vectors[1],\\\n new_c*scale_c])\n \n # In 1D\n elif dim==1:\n unitV = lat_vectors[0]/magni(lat_vectors[0])\n if unitV[0]==0:\n perp1 = [1,0,0]\n elif unitV[1]==0:\n perp1 = [0,1,0]\n elif unitV[2]==0:\n perp1 = [0,0,1]\n else:\n perp1 = [1,1,-1*(unitV[0]+unitV[1])/unitV[2]]\n perp1 = perp1/np.linalg.norm(perp1)*c_mag\n perp2 = np.cross(unitV,perp1)\n perp2 = perp2/np.linalg.norm(perp2)*c_mag\n latt_attempt = np.array([lat_vectors[0],perp1,perp2])\n \n # Fit atomic sites to new lattice\n temp_fracs = np.linalg.solve(latt_attempt.T,np.array(coords).T)\n \n \n\n # Make list of all fractional positions, ignoring\n # which axis\n new_fracs = list([list(x) for x in temp_fracs.T])\n\n if len([x for x in np.array(new_fracs).T if \n np.all([(y>=0 and y<1) for y in np.around(x[:dim],3)]) and\n np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) for \n y in np.around(x[dim:],3)])])==len(new_fracs[0])/supercell**dim:\n \n fit_fracs=[]\n new_fracs_t = np.around(new_fracs.T,6)\n for i in range(len(new_fracs[0])):\n if np.all([(y>=0 and y<1) for y in np.around(new_fracs_t[i][:dim],3)]) \\\n and np.all([(y>=-y_dist/c_mag and y<y_dist/c_mag) \n for y in np.around(new_fracs_t[i][dim:],3)]):\n fit_fracs.append([new_fracs_t[i],specs[i]])\n fit_fracs = np.array(fit_fracs).T\n new_ele_d = getUniqueCount(fit_fracs[1])\n unequal=False\n for k in new_ele_d:\n if new_ele_d[k]!=ref_ele_d[k]:\n unequal=True\n\n break\n if not unequal:\n\n return_structure=True\n break\n\n\n\n # If match found\n if return_structure:\n return(np.array(latt_attempt),fit_fracs)\n # If no match found\n else:\n return([],[])", "def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n return k+self.offset\n return None", "def neuronUtilization(neuron_cords, bmu_batch, name=\"\"):\n with tf.variable_scope('neuronUtilization' + str(name)):\n # Compute the cords for each bmu.\n # Input Tensor Shape bmu_batch: [batch_size, num_maps]\n # Input Tensor Shape neuron_cords: [num_som_neurons, 2]\n # Output Tensor Shape: [batch_size, num_maps, 2] - the cords for each bmu.\n bmu_cords_per_map_batch = tf.gather(neuron_cords, bmu_batch)\n\n # Input Tensor Shape: [batch_size, num_maps, 2]\n # Output Tensor Shape: [batch_size, num_maps, 1, 2]\n bmu_cords_per_map_batch = tf.expand_dims(bmu_cords_per_map_batch, axis=2)\n\n # Input Tensor Shape: [num_som_neurons, 2]\n # Output Tensor Shape: [1, 1, num_som_neurons 2]\n neuron_cords = tf.expand_dims(tf.expand_dims(neuron_cords, axis=0), axis=0)\n\n # Compare the coords from the bmu_batch with the cords from the map and set similar cords to true.\n # Input Tensor Shape: [batch_size, num_maps, 1, 2]\n # Input Tensor Shape: [1, 1, num_som_neurons 2]\n # Output Tensor Shape: [batch_size, num_maps, num_som_neurons, 2]\n comp = tf.equal(bmu_cords_per_map_batch, neuron_cords)\n\n # Reduce the min value of the 2D cord to get True values everywhere the 2D cord matched with the BMU\n # Then reduce the whole batch to get a map with True on every position a BMU was found.\n # Input Tensor Shape: [batch_size, num_maps, num_som_neurons, 2]\n # Output Tensor Shape: [num_maps, num_som_neurons]\n unique = tf.reduce_max(tf.reduce_min(tf.to_int32(comp), axis=3), axis=0)\n\n # Count the values set to true.\n # Input Tensor Shape: [num_maps, num_som_neurons]\n # Output Tensor Shape: [1]\n non_zeros = tf.math.count_nonzero(unique)\n\n # Calculate the number of neurons and div the number of used neurons trough all neurons in the layer.\n # Input Tensor Shape: [1]\n # Output Tensor Shape: [1]\n num_maps, nr_neurons = unique.get_shape()\n neuron_utilization = tf.truediv(tf.to_float(non_zeros), tf.to_float(num_maps.value*nr_neurons.value))\n\n # Output Tensor Shape: [1]\n return neuron_utilization", "def cent_man_dist(position):\n # Parse out the rank and file coordinates of the position\n f, r = position\n # Return the answer by looking up from the table\n row = 8-r\n column = f-1\n return CMD[row][column]", "def get_neighbourhood(indices, map_shape):\n if isinstance(map_shape, int):\n nx = 1\n size = map_shape\n elif len(map_shape) == 2:\n nx = map_shape[1]\n size = map_shape[0] * map_shape[1]\n else:\n print(\"Check your `map_shape` value.\")\n return\n extended = list(indices)\n for s in extended:\n susjedi = np.unique(\n np.array([s-2*nx,\n s-nx-1, s-nx, s-nx+1,\n s-2, s-1, s, s+1, s+2,\n s+nx-1, s+nx, s+nx+1,\n s+2*nx]))\n susjedi_cor = susjedi[(susjedi >= 0) & (susjedi < size)]\n extended = extended + list(susjedi_cor)\n return np.sort(np.unique(extended))", "def calculate_dist_from_eqm(distance_from_eqm, variable_mask):", "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def magnetic_dynamical_matrix_kspace_unitcell(k, mglat):\n # First use mglat to get (angs, num_neis, bls, tvals, ons, aol=0.8)\n #\n # angs : list\n # each row represents a site in the lattice. Each entry in the row represents the angles to that site's\n # neighbors\n # num_nei : list or array (num_sites x num_sites)\n # Tells how many neighbors of on each kind of sublattice. For example a honeycomb lattice would be\n # num_nei = [[0,3], [3,0]] because each point has 3 neighbors of the other lattice type.\n # bls : len(angs) x 1 float array or int\n # dimension equal to dimension of angs. default value is -1 indicating that all bond lengths are 1\n # tvals : len(angs) x 1 float array or int\n # dimension equal to number of different kinds of springs in unit cell x 1. represents omega_k\n # ons : array (dimension = num_sites pre unit cell)\n # represents omega_g\n if mglat.unit_cell is None:\n unitcell = mglat.get_unitcell()\n else:\n unitcell = mglat.unit_cell\n if unitcell is None:\n raise RuntimeError('Network has no stored unit cell')\n\n angs = unitcell['angs']\n num_nei = unitcell['num_nei']\n bls = unitcell['bls']\n tvals = unitcell['tvals']\n ons = unitcell['ons']\n\n num_sites = len(angs)\n Ok = tvals[0][0]\n\n tvals = list(np.ones_like(tvals))\n if aol > 0:\n a = aol ** 2.\n Op = (1. + a / 6. - (1. / 4 + a / 12.)) * Ok\n Om = (1. + a / 6. + (1. / 4 + a / 12.)) * Ok\n else:\n Op = 1.\n Om = 1.\n\n M = np.zeros([2 * num_sites, 2 * num_sites], dtype='complex')\n\n if bls == -1:\n bls = np.ones_like(angs)\n if tvals == -1:\n tvals = np.ones_like(angs)\n if ons == 1:\n ons = np.ones(num_sites)\n\n for i in range(len(M)):\n index = i % num_sites\n angs_for_row = angs[index]\n bls_for_row = bls[index]\n num_neis_row = num_neis[index]\n num_bonds = len(angs[index])\n\n tv = tvals[index]\n num_bonds = sum(tv)\n\n ff = 0\n tt = tv[2] * (1. + 2. * np.sin(20 * np.pi / 180.))\n\n fill_count = 0\n s_fill_count = 0\n for j in range(len(M)):\n if i == j:\n if i < num_sites:\n M[i, j] = Op * num_bonds + 2 * (ons[index] - (3. / 8) * a * Ok)\n else:\n M[i, j] = - Om * num_bonds - 2 * (ons[index] - (3. / 8) * a * Ok)\n else:\n ii = j % num_sites\n num_nei = num_neis_row[ii]\n # print 'num nei', num_nei\n\n if i < num_sites and j < num_sites:\n for l in range(num_nei):\n M[i, j] += - Op * tv[fill_count] * \\\n np.exp(1j * np.dot(k, vec(angs_for_row[fill_count], bls_for_row[fill_count])))\n fill_count += 1\n elif i >= num_sites and j >= num_sites:\n for l in range(num_nei):\n M[i, j] += Om * tv[fill_count] * \\\n np.exp(1j * np.dot(k, vec(angs_for_row[fill_count], bls_for_row[fill_count])))\n fill_count += 1\n elif i < num_sites and j >= num_sites:\n if j == num_sites + i:\n M[i, j] = Om * np.sum([tv[u] * ang_fac(angs_for_row[u]) for u in range(len(angs_for_row))])\n else:\n for l in range(num_nei):\n vv = vec(angs_for_row[s_fill_count], bls_for_row[s_fill_count])\n M[i, j] += - Om * tv[s_fill_count] * ang_fac(angs_for_row[s_fill_count]) * \\\n np.exp(1j * np.dot(k, vv))\n s_fill_count += 1\n elif i >= num_sites and j < num_sites:\n if j == (num_sites + i) % num_sites:\n M[i, j] = - Op * np.sum([tv[u] * ang_fac(-angs_for_row[u]) for u in range(len(angs_for_row))])\n else:\n for l in range(num_nei):\n vv = vec(angs_for_row[s_fill_count], bls_for_row[s_fill_count])\n M[i, j] += Om * tv[s_fill_count] * ang_fac(-angs_for_row[s_fill_count]) * \\\n np.exp(1j * np.dot(k, vv))\n s_fill_count += 1\n\n return -0.5 * M", "def h(pos,obj):\n return D(pos)*(distancia_nodos(pos,obj))", "def _init_homolog_centers(self, method=\"kmeans\", min_spot_num=2, axis_infos=Axis3D_infos):\n if hasattr(self, 'chr_2_homolog_centers') and not self.overwrite:\n if self.verbose:\n print(f\"- directly return chr_2_homolog_centers\")\n return\n if method == 'kmeans':\n from sklearn.cluster import KMeans\n # chr_2_init_centers\n self.chr_2_homolog_centers = {}\n self.chr_2_cand_hzxys = {}\n self.chr_2_cand_ids = {}\n # loop through chrs\n for _chr_name, _exp_num in self.chr_2_copyNum.items():\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n # if not spots exists, skip\n if len(_chr_coords_df) < min_spot_num:\n continue\n # get coordinates\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n # append\n self.chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n self.chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate weights\n _uinds, _uind_counts = np.unique(_chr_ids, return_counts=True)\n _ind_2_weight = {_i:1/_c for _i,_c in zip(_uinds, _uind_counts)}\n _chr_weights = np.array([_ind_2_weight[_i] for _i in _chr_ids])\n # K-means\n if method =='kmeans':\n _model = KMeans(n_clusters=_exp_num, random_state=0)\n _model.fit(_chr_hzxys[:,1:], sample_weight=_chr_weights)\n #_init_labels = _model.labels_\n _init_centers = _model.cluster_centers_\n # save for now\n self.chr_2_homolog_centers[_chr_name] = _init_centers", "def _get_node_distance_matrix(\n self, datapoint: np.ndarray, som_array: np.ndarray\n ) -> np.ndarray:\n # algorithms on the full matrix\n if self.distance_metric == \"euclidean\":\n return np.linalg.norm(som_array - datapoint, axis=2)\n\n # node-by-node algorithms\n distmat = np.zeros((self.n_rows, self.n_columns))\n if self.distance_metric == \"manhattan\":\n for node in self.node_list_:\n distmat[node] = dist.cityblock(\n som_array[node[0], node[1]], datapoint\n )\n\n elif self.distance_metric == \"mahalanobis\":\n for node in self.node_list_:\n som_node = som_array[node[0], node[1]]\n cov = np.cov(\n np.stack((datapoint, som_node), axis=0), rowvar=False\n )\n cov_pinv = np.linalg.pinv(cov) # pseudo-inverse\n distmat[node] = dist.mahalanobis(datapoint, som_node, cov_pinv)\n\n elif self.distance_metric == \"tanimoto\":\n # Note that this is a binary distance measure.\n # Therefore, the vectors have to be converted.\n # Source: Melssen 2006, Supervised Kohonen networks for\n # classification problems\n # VERY SLOW ALGORITHM!!!\n threshold = 0.5\n for node in self.node_list_:\n som_node = som_array[node[0], node[1]]\n distmat[node] = dist.rogerstanimoto(\n binarize(\n datapoint.reshape(1, -1),\n threshold=threshold,\n copy=True,\n ).ravel(),\n binarize(\n som_node.reshape(1, -1), threshold=threshold, copy=True\n ).ravel(),\n )\n\n elif self.distance_metric == \"spectralangle\":\n for node in self.node_list_:\n distmat[node] = np.arccos(\n np.divide(\n np.dot(som_array[node[0], node[1]], datapoint),\n np.multiply(\n # TODO check if an axis needs to be set here\n np.linalg.norm(som_array),\n np.linalg.norm(datapoint),\n ),\n )\n )\n\n return distmat", "def __init__(self,hdu_list,i_bin,fsk,mask_binary,masked_fraction,contaminants=None) :\n #Read numbers map\n self.fsk,nmap=read_flat_map(None,hdu=hdu_list[2*i_bin])\n compare_infos(fsk,self.fsk)\n\n #Read N(z)\n self.nz_data=hdu_list[2*i_bin+1].data.copy()\n\n #Make sure other maps are compatible\n if not self.fsk.is_map_compatible(mask_binary) :\n raise ValueError(\"Mask size is incompatible\")\n if not self.fsk.is_map_compatible(masked_fraction) :\n raise ValueError(\"Mask size is incompatible\")\n if contaminants is not None :\n for ic,c in enumerate(contaminants) :\n if not self.fsk.is_map_compatible(c) :\n raise ValueError(\"%d-th contaminant template is incompatible\"%ic)\n \n #Translate into delta map\n self.masked_fraction=masked_fraction\n self.weight=masked_fraction*mask_binary\n goodpix=np.where(mask_binary>0.1)[0]\n self.goodpix=goodpix\n self.mask_binary=mask_binary\n self.Ngal = np.sum(nmap*mask_binary)\n ndens=np.sum(nmap*mask_binary)/np.sum(self.weight)\n self.ndens_perad=ndens/(np.radians(self.fsk.dx)*np.radians(self.fsk.dy))\n self.delta=np.zeros_like(self.weight)\n self.delta[goodpix]=nmap[goodpix]/(ndens*masked_fraction[goodpix])-1\n\n #Reshape contaminants\n conts=None\n if contaminants is not None :\n conts=[[c.reshape([self.fsk.ny,self.fsk.nx])] for c in contaminants]\n\n #Form NaMaster field\n self.field=nmt.NmtFieldFlat(np.radians(self.fsk.lx),np.radians(self.fsk.ly),\n self.weight.reshape([self.fsk.ny,self.fsk.nx]),\n [self.delta.reshape([self.fsk.ny,self.fsk.nx])],\n templates=conts)", "def build_magmom(self,list_oxidizable_site_indices):\n\n MAGMOM = []\n # tabulate how many sites must be reduced from every species in the variable_magnetization_dict.\n reduction_counter = {}\n for key in self.variable_magnetization_dict:\n reduction_counter[key] = self.variable_magnetization_dict[key]['n_reduced']\n\n dict_reduction = {}\n #reduce according to proximity\n for i_s in list_oxidizable_site_indices:\n symbol = self.structure.sites[i_s].specie.symbol\n \n if reduction_counter[symbol] > 0:\n dict_reduction[i_s] = self.variable_magnetization_dict[symbol]['m_reduced']\n reduction_counter[symbol] -= 1\n elif reduction_counter[symbol] == 0:\n dict_reduction[i_s] = self.variable_magnetization_dict[symbol]['m_oxidized']\n\n else:\n print(\"SOMETHING IS WRONG. REVIEW CODE!\")\n sys.exit()\n\n for i_s, site in enumerate(self.structure):\n if i_s in dict_reduction:\n # add a bit of randomness to not get trapped in metastable solution.\n # It is quite useless to have a random number with 16 decimals, and it \n # makes the INCAR ugly; let's round.\n random_addition = np.round( 0.2*np.random.random(1)[0]-0.1, 6)\n MAGMOM.append(dict_reduction[i_s]+random_addition)\n else:\n MAGMOM.append(0.6)\n\n return MAGMOM", "def cluster_spatial_positioning(data):\n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n if n_clusters <2:\n #Setting cluster angluar features to default\n cdist=[Cluster_Relative_Distances()]\n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n elif n_clusters >=2:\n # Here we implement two approaches for measuring distances between clustes:\n # (1) border-boder distances and (2) centroid-centroid distances. \n # We compute dispersion measures for the distances obtained. \n \n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n\n min_dist_between_clusters=np.row_stack([[np.amin(ss.distance_matrix(np.column_stack([d[i]['X'].array,d[i]['Y'].array]), \n np.column_stack([d[j]['X'].array,d[j]['Y'].array]))) for j in d.keys()] for i in d.keys()])\n min_dist_between_clusters=np.delete(list(set(np.frombuffer(min_dist_between_clusters))) ,0)\n\n cen_dist_between_clusters=ss.distance_matrix(np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]),\n np.row_stack([(np.mean(d[i]['X'].array),np.mean(d[i]['Y'].array)) for i in d.keys()]))\n cen_dist_between_clusters=np.delete(list(set(np.frombuffer(cen_dist_between_clusters))) ,0)\n\n (avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster)= distribution_statistics(min_dist_between_clusters)\n\n (avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster)= distribution_statistics(cen_dist_between_clusters)\n\n cdist = [Cluster_Relative_Distances([avg_bor_bor_dist_cluster,min_bor_bor_dist_cluster,max_bor_bor_dist_cluster,\n std_bor_bor_dist_cluster,CV_bor_bor_dist_cluster,CD_bor_bor_dist_cluster,\n IQR_bor_bor_dist_cluster,Quartile_CD_bor_bor_dist_cluster,\n avg_cen_cen_dist_cluster,min_cen_cen_dist_cluster,max_cen_cen_dist_cluster,\n std_cen_cen_dist_cluster,CV_cen_cen_dist_cluster,CD_cen_cen_dist_cluster,\n IQR_cen_cen_dist_cluster,Quartile_CD_cen_cen_dist_cluster])]\n \n cdist = pd.DataFrame([o.__dict__ for o in cdist])\n\n \n return cdist", "def calculate_bmu(self, input):\n result = 0\n\n if len(input) > self.som.input_count:\n raise Exception(\n \"Can't train SOM with input size of {} with input data of count {}.\".format(self.som.input_count,\n len(input)))\n\n # Track the lowest distance so far.\n lowest_distance = float(\"inf\")\n\n for i in range(self.som.output_count):\n distance = self.calculate_euclidean_distance(self.som.weights, input, i)\n\n # Track the lowest distance, this is the BMU.\n if distance < lowest_distance:\n lowest_distance = distance\n result = i\n\n # Track the worst distance, this is the error for the entire network.\n if lowest_distance > self.worst_distance:\n self.worst_distance = lowest_distance\n\n return result", "def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True", "def build_magmom(self,list_oxidizable_site_indices,number_of_electrons):\n\n MAGMOM = []\n ne = number_of_electrons\n\n dict_oxidizable = {}\n\n for i_s in list_oxidizable_site_indices:\n symbol = self.structure.sites[i_s].specie.symbol\n if ne > 0:\n dict_oxidizable[i_s] = self.variable_magnetization_dict[symbol][1]\n ne -= 1\n elif ne == 0:\n dict_oxidizable[i_s] = self.variable_magnetization_dict[symbol][0]\n\n else:\n print(\"SOMETHING IS WRONG. REVIEW CODE!\")\n sys.exit()\n\n for i_s, site in enumerate(self.structure):\n if i_s in dict_oxidizable:\n # add a bit of randomness to not get trapped in metastable solution.\n # It is quite useless to have a random number with 16 decimals, and it \n # makes the INCAR ugly; let's round.\n random_addition = np.round( 0.2*np.random.random(1)[0]-0.1, 6)\n MAGMOM.append(dict_oxidizable[i_s]+random_addition)\n else:\n MAGMOM.append(0.6)\n\n return MAGMOM", "def __init__(self, x, y, input_len, sigma=1.0, learning_rate=0.5,\n decay_function=asymptotic_decay,\n neighborhood_function='gaussian', topology='rectangular',\n activation_distance='euclidean', random_seed=None):\n if sigma >= x or sigma >= y:\n warn('Warning: sigma is too high for the dimension of the map.')\n\n self._random_generator = random.RandomState(random_seed)\n\n self._learning_rate = learning_rate\n self._sigma = sigma\n self._input_len = input_len\n # random initialization\n self._weights = self._random_generator.rand(x, y, input_len)*2-1\n self._weights /= linalg.norm(self._weights, axis=-1, keepdims=True)\n\n self._activation_map = zeros((x, y))\n self._neigx = arange(x)\n self._neigy = arange(y) # used to evaluate the neighborhood function\n\n if topology not in ['hexagonal', 'rectangular']:\n msg = '%s not supported only hexagonal and rectangular available'\n raise ValueError(msg % topology)\n self.topology = topology\n self._xx, self._yy = meshgrid(self._neigx, self._neigy)\n self._xx = self._xx.astype(float)\n self._yy = self._yy.astype(float)\n if topology == 'hexagonal':\n self._xx[::-2] -= 0.5\n if neighborhood_function in ['triangle']:\n warn('triangle neighborhood function does not ' +\n 'take in account hexagonal topology')\n\n self._decay_function = decay_function\n\n neig_functions = {'gaussian': self._gaussian,\n 'mexican_hat': self._mexican_hat,\n 'bubble': self._bubble,\n 'triangle': self._triangle}\n\n if neighborhood_function not in neig_functions:\n msg = '%s not supported. Functions available: %s'\n raise ValueError(msg % (neighborhood_function,\n ', '.join(neig_functions.keys())))\n\n if neighborhood_function in ['triangle',\n 'bubble'] and (divmod(sigma, 1)[1] != 0\n or sigma < 1):\n warn('sigma should be an integer >=1 when triangle or bubble' +\n 'are used as neighborhood function')\n\n self.neighborhood = neig_functions[neighborhood_function]\n\n distance_functions = {'euclidean': self._euclidean_distance,\n 'cosine': self._cosine_distance,\n 'manhattan': self._manhattan_distance,\n 'chebyshev': self._chebyshev_distance}\n\n if isinstance(activation_distance, str):\n if activation_distance not in distance_functions:\n msg = '%s not supported. Distances available: %s'\n raise ValueError(msg % (activation_distance,\n ', '.join(distance_functions.keys())))\n\n self._activation_distance = distance_functions[activation_distance]\n elif callable(activation_distance):\n self._activation_distance = activation_distance", "def mesh_uniform(N_e, d, Omega):", "def _neuron_location(self, m, n):\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def igb_mapped_norm(igbfile1, igbfile2, mapfile):\n \n [Vm1, hd1] = rigb.read_igb_slice(igbfile1)\n [Vm2, hd2] = rigb.read_igb_slice(igbfile2) \n \n vm1 = Vm1.squeeze(); vm2 = Vm2.squeeze()\n \n shp1 = shape(vm1)\n shp2 = shape(vm2)\n xdim1 = shp1[1]; time1 = shp1[0];\n xdim2 = shp2[1]; time2 = shp2[0]\n \n print \"\"\n\n if mapfile is not None:\n # match nodes in hybrid and tetrahedra mesh\n maparray = read_array_pts(mapfile)\n size = len(maparray[:,0]) \n temp = zeros((time1,size))\n \n for t in xrange(time1):\n #count = 0\n for i in xrange(len(maparray[:,0])):\n ind1 = int(maparray[i][0])\n ind2 = int(maparray[i][1])\n temp[t][i] = vm1[t][ind1] - vm2[t][ind2]\n #count = count + 1\n else: \n # compute the difference\n temp = zeros((time1,xdim1))\n for t in xrange(time1):\n count = 0\n temp[t][:] = vm1[t][:] - vm2[t][:]\n \n erro = zeros(time1)\n \n print '============ P O I N T W I S E C O M P A R I S O N ==========\\n'\n print 'At time t=%d ms' % (20) \n print ' Maximum norm at time=20ms : ' , max( temp[20,:] )\n print ' L2 norm at time=20ms : ' , linalg.norm( temp[20,:] )\n print '\\nAt time t=%d ms' % (100) \n print ' Maximum norm at time=100ms: ' , max( temp[100,:] )\n print ' L2 norm at time=100ms : ' , linalg.norm( temp[100,:] )\n print '\\nAt time t=%d ms' % (120) \n print ' Maximum norm at time=120ms: ' , max( temp[120,:] )\n print ' L2 norm at time=120ms : ' , linalg.norm( temp[120,:] )\n \n n2chk = 450\n print '\\nDifference in arrival time (n=450): ' , chk_dif_in_max_atime(vm1[:,n2chk],vm2[:,n2chk]) \n \n for t in xrange(time1):\n #erro[t] = linalg.norm(temp[t,:])\n erro[t] = max(temp[t,:])\n \n #ax = subplot(111)\n #mytime = arange(0,151)\n #ax.plot(mytime, vm1[:,n2chk], mytime, vm2[:,n2chk])\n #ax.legend(('hyb','tet'), 'upper right')\n #show()\n\n \n ### new stuff ###\n print '\\n============= E R R O R C O M P A R I S O N =============\\n'\n\n # interpolate solution\n\n # compute error as a column vector\n t = 10\n e = temp[t][:]\n\n # read and store CARP lumped mass matrix\n massfile1 = '/data/sim/simulacao_1/hyb_75um/output/MatLabDump_Mi.bin'\n M = petsc_binary_read (massfile1,0)\n size = np.size(M)\n data = M\n rows = np.arange(0,size,1)\n cols = np.arange(0,size,1)\n A = sparse.coo_matrix( (data,(rows,cols)) ,(size, size) )\n \n #aux = dot(e,A*e)\n print '\\nAt time t=%d ms' % (t)\n print ' Mean-square-root L2 Norm (normal) : ', compute_L2_error(e,A)\n print ' Mean-square-root L2 Norm (linalg.norm):' , linalg.norm(e)\n\n print '\\n'\n pdb.set_trace()", "def compute_s(\n traj,\n surface_normal_dim=2,\n pore_center = 0.0,\n max_distance = 1.0,\n bin_width=0.01\n ):\n # Make molecules whole first\n\n #remove the below task as molecules are not split\n #traj.make_molecules_whole(inplace=True)\n\n # Select ow and hw\n water_o = traj.top.select(\"name O1\")\n water_h = traj.top.select(\"name H1 H2\")\n traj_ow = traj.atom_slice(water_o)\n traj_hw = traj.atom_slice(water_h)\n\n # Compute angles between surface normal ([0,0,1]) and h-o-h bisector\n hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames,-1,2,3).mean(axis=2)\n vectors = (traj_ow.xyz - hw_midpoints)\n vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)\n cos_angles = vectors[:,:,surface_normal_dim]\n\n # Compute distances -- center of pore already @ 0,0; use OW position\n distances = traj_ow.xyz[:,:,surface_normal_dim] - pore_center\n bin_centers = []\n s_values = []\n for bin_center in np.arange(-max_distance, max_distance, bin_width):\n mask = np.logical_and(\n distances > bin_center - 0.5 * bin_width,\n distances < bin_center + 0.5 * bin_width\n )\n s = (3.0 * np.mean(cos_angles[mask]**2) - 1.0) / 2.0\n bin_centers.append(bin_center)\n s_values.append(s)\n\n return bin_centers, s_values", "def SOM(args):\n\n # Obtain the normalized set of cities (w/ coord in [0,1])\n cities = pd.read_csv(Path(args.data_dir) / 'data1.csv')\n\n iteration = args.iteration\n learning_rate = args.learning_rate\n decay = args.decay\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir_p()\n\n cities_nm = cities.copy()\n\n cities_nm[['x', 'y']] = normalize(cities_nm[['x', 'y']])\n cities_nm.to_csv(out_dir/'cities_nm.csv')\n cities.to_csv(out_dir/'cities.csv')\n\n\n depot = cities_nm.query('city==0')[['x','y']].to_numpy()\n # The population size is 8 times the number of cities\n #n = cities_cp.shape[0] * 2# a single route's neurons\n n=100\n # Generate an adequate network of neurons:\n #network = generate_network(n)\n neuron_chains =init_neurons(size=n,depot=depot)\n print('--> Network of {} neurons created. Starting the iterations:'.format(n))\n best_routes=np.array([0])\n\n #save\n losses_sum_log={}#每个循环losses_sum值\n min_losses_sum_log = {}##保存最小值的路径losses\n min_losses_log={}#存储最好情况下四条路径的距离值\n min_routes_log={}\n best_id=0\n min_losses_sum=0\n\n for i in tqdm(range(iteration)):\n if not i % args.neuro_plot_freq:\n print('\\t> Iteration {}/{}'.format(i, iteration), end=\"\\r\")\n # Choose a random city\n sample = cities_nm.sample(1)\n if int(sample['city']) in args.depot_idxs:\n continue\n city = sample[['x', 'y']].values#随机抽样 random sampling\n group_idx,winner_idx = select_closest_gpid(neuron_chains, city)\n\n # Generate a filter that applies changes to the winner's gaussian\n gaussian = get_neighborhood(center=winner_idx, radix=n//10, domain=neuron_chains[0].shape[0])\n # Update the network's weights (closer to the city)\n neuron_chains[group_idx] += gaussian[:,np.newaxis] * learning_rate * (city - neuron_chains[group_idx])\n # Decay the variables\n learning_rate = learning_rate * decay\n n = n * decay\n\n\n if i % args.evaluate_freq==0:\n cities_od = rebuild_cities(cities_nm,neuron_chains,args.num_depots)\n cities_od[['x','y']] =cities.reindex(cities_od['city'])[['x','y']]\n losses = routes_distances(cities_od)\n losses_sum = sum(losses)\n losses_sum_log[i] = losses_sum\n\n if min_losses_sum == 0 or min_losses_sum > losses_sum:\n min_losses_sum = losses_sum\n best_id = i\n routes = get_routes(cities_od)\n routes = [list(item.astype(np.float64)) for item in routes]\n min_routes_log[i] = routes\n\n min_losses_sum_log[i] = losses_sum\n min_losses_log[i] = losses\n cities_od.to_csv(out_dir/'data_out_{:04d}.csv'.format(i))\n save_neuron_chains(neuron_chains,out_dir/\"neuron_chains_{:04d}.npy\".format(i))\n\n #end for\n\n # Check if any parameter has completely decayed.\n if n < 1:\n print('Radius has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n if learning_rate < 0.001:\n print('Learning rate has completely decayed, finishing execution',\n 'at {} iterations'.format(i))\n break\n\n\n print('Completed {} iterations.'.format(iteration))\n\n results = {}\n\n results['losses_sum_log']=losses_sum_log\n results['best_id'] = best_id\n\n results['min_losses_sum_log']=min_losses_sum_log\n results['min_losses_log']=min_losses_log\n results['min_routes_log'] = min_routes_log\n\n\n p = Path(out_dir/'results.json')\n with open(p, 'w') as fp:\n json.dump(results, fp)\n print('ok')\n\n\n return results", "def get_smarts(prefix, atom_idxs):\n\n offmol = Molecule.from_file(prefix + '.mol2')\n fix_carboxylate_bond_orders(offmol)\n remove_charge_and_bond_order_from_guanidinium(offmol)\n remove_charge_and_bond_order_from_imidazole(offmol)\n if prefix in prefix2pmd_struct:\n pmd_struct = prefix2pmd_struct[prefix]\n else: \n pmd_struct = ParmEd.load_file(prefix + '.prmtop')\n prefix2pmd_struct[prefix] = pmd_struct\n oemol = offmol.to_openeye()\n residues_of_interest = set()\n atom_indices_of_interest = set()\n for atom_idx in atom_idxs:\n residues_of_interest.add(pmd_struct.atoms[atom_idx].residue.idx)\n for oeatom, pmd_atom in zip(oemol.GetAtoms(), pmd_struct.atoms):\n\n # Delete all non-residue-of-interest atoms\n if (pmd_atom.residue.idx in residues_of_interest):\n atom_indices_of_interest.add(pmd_atom.idx)\n\n # Assign tags to atoms of interest\n if (oeatom.GetIdx() in atom_idxs):\n map_index = atom_idxs.index(oeatom.GetIdx()) + 1\n oeatom.SetMapIdx(map_index)\n\n # Make a \"Subset\" molecule, so that we don't get weird charges\n # around where we cleave the residues\n subsetmol = OEChem.OEGraphMol()\n oepred = OEChem.PyAtomPredicate(lambda x:x.GetIdx() in atom_indices_of_interest)\n OEChem.OESubsetMol(subsetmol, oemol, oepred)\n #OEAssignAromaticFlags(subsetmol, \n smiles_options = (OEChem.OESMILESFlag_Canonical | OEChem.OESMILESFlag_Isotopes |\n OEChem.OESMILESFlag_RGroups)\n # Add the atom and bond stereo flags\n smiles_options |= OEChem.OESMILESFlag_AtomStereo | OEChem.OESMILESFlag_BondStereo\n\n # Add the hydrogen flag\n smiles_options |= OEChem.OESMILESFlag_Hydrogens\n smiles_options |= OEChem.OESMILESFlag_AtomMaps\n smiles = OEChem.OECreateSmiString(subsetmol, smiles_options)\n\n # Replace \"$\" characters (bond order 4) with \"~\" (wildcard bond)\n smiles = smiles.replace('$', '~')\n return smiles", "def get_wm_ws_Gx_bot(self):\n # BASICALLY SETS self.Gm1_bot, self.dGm1_dS_bot, self.Gt1_bot, self.dGt1_dS_bot \n z_u_r = self.grid_dict['z_u_r']\n z_u_w = self.grid_dict['z_u_w']\n [Ly,N] = self.b.shape\n #---> j-loop\n for j in range(Ly): \n self.kbl[j] = N # initialize search\n #-> end j-loop\n\n #--> k-loop\n for k in range(N-1,0,-1):\n k_w = k\n k_r = k-1\n # --> j loop \n for j in range(Ly):\n if z_u_r[j,k_r] - z_u_w[j,0] > self.hbbl[j]:\n self.kbl[j] = k_w\n\n #--> end k\n # --> end j\n\n\n '''\n Compute nondimenisonal shape function coefficeints Gx() by\n matching values and vertical derivatives of interior mixing\n coefficients at hbbl (sigma=1)\n '''\n\n self.Gm1_bot = np.zeros([Ly])\n self.dGm1_dS_bot = np.zeros([Ly])\n self.Gt1_bot = np.zeros([Ly])\n self.dGt1_dS_bot = np.zeros([Ly]) \n self.Av_bl_bot = np.zeros([Ly])\n self.dAv_bl_bot = np.zeros([Ly]) \n self.cff_up_bot = np.zeros([Ly])\n self.cff_dn_bot = np.zeros([Ly])\n\n\n\n\n\n self.wm_bot = np.zeros([Ly])\n self.ws_bot = np.zeros([Ly]) \n\n # CALCULATE ustar for the bottom based on bototm velocities\n \n \n \n # CALCULATE r_D\n self.r_D = TTTW_func.get_r_D(self.u,self.v,self.Zob,self.grid_dict) \n u = self.u\n v_upts = TTTW_func.v2u(self.v)\n \n ubar = np.mean(u,axis=1)\n vbar = np.mean(v_upts,axis=1)\n\n # --> j loop\n for j in range(Ly):\n # turbulent velocity sclaes with buoyancy effects neglected\n if self.CD_SWITCH:\n # DEPTH AVERAGED APPROACH\n uref = u[j,0]\n vref = v_upts[j,0]\n ustar2 = self.C_D * (uref**2 + vref**2)\n else:\n ustar2 = self.r_D[j] * np.sqrt(u[j,0]**2 + v_upts[j,0]**2)\n wm = self.vonKar * np.sqrt(ustar2)\n ws = wm\n\n self.wm_bot[j] = wm\n self.ws_bot[j] = ws\n \n k_w = self.kbl[j] \n z_bl = z_u_w[j,0] + self.hbbl[j]\n\n if z_bl < z_u_w[j,k_w-1]:\n k_w = k_w-1\n\n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * ( self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl_bot[j] = Av_bl\n self.dAv_bl_bot[j] = dAv_bl\n\n\n self.Gm1_bot[j] = Av_bl / (self.hbbl[j] * wm + self.eps)\n self.dGm1_dS_bot[j] = np.min([0,-dAv_bl/(ws+self.eps)])\n\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * ( self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1_bot[j] = At_bl / (self.hbbl[j] * ws + self.eps)\n self.dGt1_dS_bot[j] = np.min([0,-dAt_bl/(ws+self.eps)])", "def soti_block_slab(size, p , q, nu, zu, t = -1, M = 2.3, D1 = 0.8, D2 = 0.5):\n # put unit_blocks into diag\n \n # make blocks array with dims (size,4q,4q)\n blocks = np.zeros((size,4*q,4*q),dtype=complex) \n \n # fill up\n #xs = linspace(0,size,num=size) # for completeness\n for i in range(size):\n #x = xs[i] # doesn't actually do anything\n blocks[i,:,:] = unit_block_slab(p=p,q=q,nu=nu,zu=zu,t=t,M=M,D1=D1,D2=D2)\n \n # put in diagonal\n M_diags = ss.block_diag(blocks)\n \n # off diagonals x -> x+1 & h.c.\n hop_x = 1/2 * (t * pms.s0_tz() + 1j * D1 * pms.sx_tx() + D2 * pms.s0_ty())\n hop_x_dag = hop_x.conj().T\n \n # fill up to identity\n hop_x_mat = np.kron(np.eye(N=size), hop_x)\n hop_x_mat_dag = np.kron(np.eye(N=size), hop_x_dag)\n \n # put these \"identity\" matrices on the off-diagonals\n ### double check the math for this section please\n M_top_diag = np.kron(np.diag(np.ones(size-1), k=1), hop_x_mat)\n M_bot_diag = np.kron(np.diag(np.ones(size-1), k=-1), hop_x_mat_dag)\n \n M_off_diags = M_top_diag + M_bot_diag\n \n MAT = M_diags + M_off_diags\n \n return MAT", "def calc_atoms(self, cell: Cell, atom_site: AtomSiteL,\n space_group_symop: SpaceGroupSymopL, distance_min=0.3):\n\n\n r_11 = numpy.array(space_group_symop.r_11, dtype=int)\n r_12 = numpy.array(space_group_symop.r_12, dtype=int)\n r_13 = numpy.array(space_group_symop.r_13, dtype=int)\n r_21 = numpy.array(space_group_symop.r_21, dtype=int)\n r_22 = numpy.array(space_group_symop.r_22, dtype=int)\n r_23 = numpy.array(space_group_symop.r_23, dtype=int)\n r_31 = numpy.array(space_group_symop.r_31, dtype=int)\n r_32 = numpy.array(space_group_symop.r_32, dtype=int)\n r_33 = numpy.array(space_group_symop.r_33, dtype=int)\n\n b_1 = numpy.array(space_group_symop.b_1, dtype=float)\n b_2 = numpy.array(space_group_symop.b_2, dtype=float)\n b_3 = numpy.array(space_group_symop.b_3, dtype=float)\n\n r_ij = (r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33)\n b_i = (b_1, b_2, b_3)\n\n fract_atom_auc_x = numpy.array(atom_site.fract_x, dtype=float)\n fract_atom_auc_y = numpy.array(atom_site.fract_y, dtype=float)\n fract_atom_auc_z = numpy.array(atom_site.fract_z, dtype=float)\n fract_atom_auc_xyz = (fract_atom_auc_x, fract_atom_auc_y,\n fract_atom_auc_z)\n label_atom_auc = numpy.array(atom_site.label, dtype=str)\n\n fract_atom_uc_x, fract_atom_uc_y, fract_atom_uc_z, label_atom_uc = \\\n calc_atoms_in_unit_cell(r_ij, b_i, fract_atom_auc_xyz,\n label_atom_auc)\n\n size_x = self.size_x\n size_y = self.size_y\n\n atom_center = self.atom_center\n atom_site_center = atom_site[atom_center]\n center_fract_x = atom_site_center.fract_x\n center_fract_y = atom_site_center.fract_y\n center_fract_z = atom_site_center.fract_z\n\n center_pos_x, center_pos_y, center_pos_z = \\\n cell.calc_position_by_coordinate(center_fract_x, center_fract_y,\n center_fract_z)\n\n v_pos_x, v_pos_y, v_pos_z = self.calc_axes_x_y_z(cell, atom_site)\n\n pos_atom_uc_x, pos_atom_uc_y, pos_atom_uc_z = \\\n cell.calc_position_by_coordinate(fract_atom_uc_x, fract_atom_uc_y,\n fract_atom_uc_z)\n\n pos_atom_loc_x = v_pos_x[0]*(pos_atom_uc_x - center_pos_x) + \\\n v_pos_x[1]*(pos_atom_uc_y - center_pos_y) + \\\n v_pos_x[2]*(pos_atom_uc_z - center_pos_z)\n pos_atom_loc_y = v_pos_y[0]*(pos_atom_uc_x - center_pos_x) + \\\n v_pos_y[1]*(pos_atom_uc_y - center_pos_y) + \\\n v_pos_y[2]*(pos_atom_uc_z - center_pos_z)\n pos_atom_loc_z = v_pos_z[0]*(pos_atom_uc_x - center_pos_x) + \\\n v_pos_z[1]*(pos_atom_uc_y - center_pos_y) + \\\n v_pos_z[2]*(pos_atom_uc_z - center_pos_z)\n\n flag_x = numpy.abs(pos_atom_loc_x) < 0.5*size_x\n flag_y = numpy.abs(pos_atom_loc_y) < 0.5*size_y\n flag_z = numpy.abs(pos_atom_loc_z) < distance_min\n flag_xyz = numpy.logical_and(flag_x, numpy.logical_and(flag_y, flag_z))\n\n atom_x = pos_atom_loc_x[flag_xyz]\n atom_y = pos_atom_loc_y[flag_xyz]\n atom_label = label_atom_uc[flag_xyz]\n return atom_x, atom_y, atom_label", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def generate_good(self, m, n, rank, mu=2, ka=2):\n sr = random.random()\n s = []\n s.append(sr)\n for r in range(rank-1):\n newele = s[-1] * (1 + ka * random.random() / (rank-1))\n s.append(newele)\n s.reverse()\n \n # best_u = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # A = np.random.rand(m,m)\n # A = scipy.linalg.orth(A)\n # u = A[:, :rank]\n # mu0 = self.compute_mu(u, m, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_u = u\n # print(\"mu0 for u:\", best_mu0)\n # # print(u.T @ u)\n \n # best_v = None\n # best_mu0 = 0\n # while best_mu0 == 0:\n # for _ in range(10):\n # B = np.random.rand(n,n)\n # B = scipy.linalg.orth(B)\n # v = B[:, :rank]\n # mu0 = self.compute_mu(v, n, rank)\n # print(\"mu0 : \", mu0)\n # if mu0 <= mu and mu0 >= best_mu0:\n # best_mu0 = mu0\n # best_v = v\n # print(\"mu0 for v:\", best_mu0)\n # u = best_u\n # v = best_v\n\n for _ in range(100):\n A = np.random.rand(m,m)\n A = scipy.linalg.orth(A)\n u = A[:, :rank]\n mu0 = self.compute_mu(u, m, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for u:\", mu0) \n\n for _ in range(10):\n B = np.random.rand(n,n)\n B = scipy.linalg.orth(B)\n v = B[:, :rank]\n mu0 = self.compute_mu(v, n, rank)\n print(\"mu0 : \", mu0)\n if mu0 <= mu:\n break\n print(\"mu0 for both:\", mu0)\n\n matrix = np.dot(u*s, v.T)\n \n kappa = s[0] / s[-1]\n print(\"kappa=\", kappa)\n \n ss = np.copy(s)\n for k in range(rank):\n ss[k] = s[k] / s[0]\n \n max_entry = np.max(np.abs(np.outer(u[:,:rank], v.T[:rank,:])))\n mu1 = max_entry * math.sqrt(m * n / rank)\n print(\"mu1=\", mu1)\n \n return matrix", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n ind = k+self.offset\n return ind, self.which[k]\n return None, None", "def csr2d_kick_calc(\n z_b,\n x_b,\n weight,\n *,\n gamma=None,\n rho=None,\n nz=100,\n nx=100,\n xlim=None,\n zlim=None,\n reuse_psi_grids=False,\n psi_s_grid_old=None,\n psi_x_grid_old=None,\n map_f=map,\n species=\"electron\",\n imethod='map_coordinates',\n debug=False,\n):\n assert species == \"electron\", \"TODO: support species {species}\"\n # assert np.sign(rho) == 1, 'TODO: negative rho'\n\n # Grid setup\n if zlim:\n zmin = zlim[0]\n zmax = zlim[1]\n else:\n zmin = z_b.min()\n zmax = z_b.max()\n\n if xlim:\n xmin = xlim[0]\n xmax = xlim[1]\n else:\n xmin = x_b.min()\n xmax = x_b.max()\n\n dz = (zmax - zmin) / (nz - 1)\n dx = (xmax - xmin) / (nx - 1)\n\n # Charge deposition\n t1 = time.time()\n charge_grid = histogram_cic_2d(z_b, x_b, weight, nz, zmin, zmax, nx, xmin, xmax)\n\n if debug:\n t2 = time.time()\n print(\"Depositing particles takes:\", t2 - t1, \"s\")\n\n # Normalize the grid so its integral is unity\n norm = np.sum(charge_grid) * dz * dx\n lambda_grid = charge_grid / norm\n\n # Apply savgol filter\n lambda_grid_filtered = np.array([savgol_filter(lambda_grid[:, i], 13, 2) for i in np.arange(nx)]).T\n\n # Differentiation in z\n lambda_grid_filtered_prime = central_difference_z(lambda_grid_filtered, nz, nx, dz, order=1)\n\n # Grid axis vectors\n zvec = np.linspace(zmin, zmax, nz)\n xvec = np.linspace(xmin, xmax, nx)\n\n beta = np.sqrt(1 - 1 / gamma ** 2)\n\n t3 = time.time()\n\n if reuse_psi_grids == True:\n psi_s_grid = psi_s_grid_old\n psi_x_grid = psi_x_grid_old\n\n else:\n # Creating the potential grids \n psi_s_grid, psi_x_grid, zvec2, xvec2 = green_meshes(nz, nx, dz, dx, rho=rho, beta=beta) \n \n if debug:\n t4 = time.time()\n print(\"Computing potential grids take:\", t4 - t3, \"s\")\n\n # Compute the wake via 2d convolution\n conv_s, conv_x = fftconvolve2(lambda_grid_filtered_prime, psi_s_grid, psi_x_grid)\n\n if debug:\n t5 = time.time()\n print(\"Convolution takes:\", t5 - t4, \"s\")\n\n Ws_grid = (beta ** 2 / abs(rho)) * (conv_s) * (dz * dx)\n Wx_grid = (beta ** 2 / abs(rho)) * (conv_x) * (dz * dx)\n\n # Calculate the kicks at the particle locations\n \n # Overall factor\n Nb = np.sum(weight) / e_charge\n kick_factor = r_e * Nb / gamma # m\n \n # Interpolate Ws and Wx everywhere within the grid\n if imethod == 'spline':\n # RectBivariateSpline method\n Ws_interp = RectBivariateSpline(zvec, xvec, Ws_grid)\n Wx_interp = RectBivariateSpline(zvec, xvec, Wx_grid)\n delta_kick = kick_factor * Ws_interp.ev(z_b, x_b)\n xp_kick = kick_factor * Wx_interp.ev(z_b, x_b)\n elif imethod == 'map_coordinates':\n # map_coordinates method. Should match above fairly well. order=1 is even faster.\n zcoord = (z_b-zmin)/dz\n xcoord = (x_b-xmin)/dx\n delta_kick = kick_factor * map_coordinates(Ws_grid, np.array([zcoord, xcoord]), order=2)\n xp_kick = kick_factor * map_coordinates(Wx_grid, np.array([zcoord, xcoord]), order=2) \n else:\n raise ValueError(f'Unknown interpolation method: {imethod}')\n \n if debug:\n t6 = time.time()\n print(f'Interpolation with {imethod} takes:', t6 - t5, \"s\") \n\n\n result = {\"ddelta_ds\": delta_kick, \"dxp_ds\": xp_kick}\n\n if debug:\n timing = np.array([t2-t1, t4-t3, t5-t4, t6-t5])\n result.update(\n {\n \"zvec\": zvec,\n \"xvec\": xvec,\n \"zvec2\": zvec2,\n \"xvec2\": xvec2,\n \"Ws_grid\": Ws_grid,\n \"Wx_grid\": Wx_grid,\n \"psi_s_grid\": psi_s_grid,\n \"psi_x_grid\": psi_x_grid,\n \"charge_grid\": charge_grid,\n \"lambda_grid_filtered_prime\": lambda_grid_filtered_prime,\n \"timing\": timing\n }\n )\n\n return result", "def mechanisms(self,p):\n print 'loading cell range mechanisms'\n \n # loop over trees\n for tree_key,tree in self.geo.iteritems():\n \n # list to store synapse mechanisms\n self.syns[tree_key] = []\n\n # loop over sections in tree\n for sec_i,sec in enumerate(tree):\n \n # add dimension for each section\n self.syns[tree_key].append([])\n\n # common passive biophysics for all sections\n sec.insert('pas')\n # passive conductance (S/cm2)\n sec.g_pas = 1/p['RmAll'] \n # leak reversal potential (mV) \n sec.e_pas = p['Vrest'] \n # specific capacitance (uf/cm2)\n sec.cm = p['Cm'] \n # axial resistance (ohm cm) \n sec.Ra = p['RaAll'] \n \n # axon active bipophysics\n if tree_key == 'axon':\n # voltage gated sodium\n sec.insert('nax') \n sec.gbar_nax = p['gna']*p['AXONM']\n # print 'axon sodium conductance:', sec.gbar_nax*10000\n # delayed rectifier potassium\n sec.insert('kdr') \n sec.gkdrbar_kdr = p['gkdr']\n # a-type potassium\n sec.insert('kap') \n sec.gkabar_kap = p['KMULTP']\n sec.vhalfl_kap = p['vhalfl_kap']\n sec.vhalfn_kap = p['vhalfn_kap']\n # sodium reversal potential \n sec.ena = p['ena'] \n # potassium reversal potential \n sec.ek = p['ek']\n sec.Ra = p['RaAx']\n\n\n for seg_i, seg in enumerate(sec):\n self.syns[tree_key][sec_i].append({})\n \n # soma active biophysics\n elif tree_key == 'soma':\n\n # voltage gated sodium\n sec.insert('na3')\n sec.gbar_na3 = p['gna']*p['SOMAM']\n sec.ar_na3 = p['gna_inact']\n # print 'soma sodium conductance:', sec.gbar_na3*10000\n # h-current \n sec.insert('hd')\n sec.ghdbar_hd = p['ghd'] \n sec.vhalfl_hd = p['vhalfl_hd_prox']\n sec.kl_hd = p['kl_hd']\n sec.ehd_hd = p['ehd'] \n\n\n # delayed rectifier potassium \n sec.insert('kdr')\n sec.gkdrbar_kdr = p['gkdr'] \n # a-type potassium \n sec.insert('kap')\n sec.gkabar_kap = p['KMULTP']\n sec.vhalfl_kap = p['vhalfl_kap']\n sec.vhalfn_kap = p['vhalfn_kap']\n\n sec.insert('calH')\n sec.gcalbar_calH = p['gcalbar']\n # sodium reversal potential \n sec.ena = p['ena'] \n # potassium reversal potential \n sec.ek = p['ek'] \n\n for seg_i,seg in enumerate(sec):\n self.syns[tree_key][sec_i].append({})\n\n \n # dendrites active biophysics\n elif ((tree_key == 'basal') or \n (tree_key == 'apical_trunk') or \n (tree_key == 'apical_tuft')):\n # h-current\n sec.insert('hd')\n sec.ghdbar_hd = p['ghd']\n sec.kl_hd = p['kl_hd']\n sec.ehd_hd = p['ehd']\n \n # voltage gated sodium \n sec.insert('na3')\n sec.gbar_na3 = p['gna']\n sec.ar_na3 = p['gna_inact']\n\n # delayed rectifier potassium \n sec.insert('kdr')\n sec.gkdrbar_kdr = p['gkdr'] \n # a-type potassium proximal\n sec.insert('kap')\n sec.gkabar_kap = 0 \n # a-type potassium distal \n sec.insert('kad')\n sec.gkabar_kad = 0 \n\n # L-type calcium channel\n sec.insert('calH')\n sec.gcalbar_calH = p['gcalbar']\n\n # sodium reversal potential \n sec.ena = p['ena']\n # potassium reversal potential\n sec.ek = p['ek'] \n\n # mechanisms that vary with distance from soma\n # loop over segments\n for seg_i,seg in enumerate(sec):\n \n # print seg_i\n self.syns[tree_key][sec_i].append({'ampa':[],\n 'nmda':[],\n 'clopath':[]})\n\n for syn_key,syn in self.syns[tree_key][sec_i][seg_i].iteritems():\n \n if syn_key is 'ampa':\n \n # adapting exponential synapse based on model in Varela et al. 1997\n self.syns[tree_key][sec_i][seg_i][syn_key] = h.FDSExp2Syn_D3(sec(seg.x))\n self.syns[tree_key][sec_i][seg_i][syn_key].f = p['f_ampa']\n self.syns[tree_key][sec_i][seg_i][syn_key].tau_F = p['tau_F_ampa']\n self.syns[tree_key][sec_i][seg_i][syn_key].d1 = p['d1_ampa']\n self.syns[tree_key][sec_i][seg_i][syn_key].tau_D1 = p['tau_D1_ampa']\n self.syns[tree_key][sec_i][seg_i][syn_key].d2 = p['d2_ampa']\n self.syns[tree_key][sec_i][seg_i][syn_key].tau_D2 = p['tau_D2_ampa']\n self.syns[tree_key][sec_i][seg_i][syn_key].d3 = p['d3_ampa']\n self.syns[tree_key][sec_i][seg_i][syn_key].tau_D3 = p['tau_D3_ampa']\n\n # regular double exponential synapse\n # self.syns[tree_key][sec_i][seg_i][syn_key] = h.Exp2Syn(sec(seg.x))\n # self.syns[tree_key][sec_i][seg_i][syn_key].tau1 = p['tau1_ampa']\n # self.syns[tree_key][sec_i][seg_i][syn_key].tau2 = p['tau2_ampa']\n # self.syns[tree_key][sec_i][seg_i][syn_key].i = p['i_ampa']\n # print syn\n\n elif syn_key is 'nmda':\n # print syn_key\n self.syns[tree_key][sec_i][seg_i][syn_key]= h.Exp2SynNMDA(sec(seg.x))\n self.syns[tree_key][sec_i][seg_i][syn_key].tau1 = p['tau1_nmda']\n self.syns[tree_key][sec_i][seg_i][syn_key].tau2 = p['tau2_nmda']\n # print syn\n\n elif syn_key is 'clopath':\n # print syn_key\n self.syns[tree_key][sec_i][seg_i][syn_key] = h.STDPSynCCNon(sec(seg.x))\n\n # distance from soma\n seg_dist = h.distance(seg.x,sec=sec)\n \n # sodium\n if abs(p['dgna']*seg_dist)<p['gna']:\n seg.gbar_na3 = p['gna'] + p['dgna']*seg_dist\n else:\n seg.gbar_na3 = 0.\n \n # h current\n if seg_dist < p['ghd_cutoff_distance']:\n seg.ghdbar_hd = p['ghd']*(1+p['ghd_grad']*seg_dist/100.)\n else:\n seg.ghdbar_hd = p['ghd']*(1+p['ghd_grad']*p['ghd_cutoff_distance']/100.)\n\n \n # A-type potassium\n if seg_dist > 100.: # distal\n seg.vhalfl_hd = p['vhalfl_hd_dist']\n seg.vhalfl_kad = p['vhalfl_kad']\n seg.vhalfn_kad = p['vhalfn_kad']\n if seg_dist < p['ka_cutoff_distance']:\n seg.gkabar_kad = p['KMULT']*(1+p['ka_grad']*seg_dist/100.)\n else:\n seg.gkabar_kad = p['KMULT']*(1+p['ka_grad']*p['ka_cutoff_distance']/100.)\n else: # proximal\n seg.vhalfl_hd = p['vhalfl_hd_prox']\n seg.vhalfl_kap = p['vhalfl_kap']\n seg.vhalfn_kap = p['vhalfn_kap']\n seg.gkabar_kap = p['KMULTP']*(1+p['ka_grad']*seg_dist/100.)\n\n # print tree_key, sec_i, seg_i, dir(seg.calH) ", "def calculate_molecular_contents(self, scale, toler, covalent_radii):\n # Calculate the contents of all the cells adjacent to the central cell\n adjacents = ( 0, -1, 1 )\n translations = [ (i, j, k) for i in adjacents for j in adjacents for k in adjacents ]\n fractional_supercell = []\n index_supercell = []\n for tr in translations:\n for l,a in enumerate(self.fractional_coordinates):\n i,j,k = tr\n index_supercell.append( l )\n new_position = [ (xyz1 + xyz2) for xyz1, xyz2 in zip(a, tr) ]\n fractional_supercell.append( new_position )\n #jk print('New positions ',new_position,l,i,j,k)\n # Convert fractional supercell coordinates to xyz\n # xyz_supercell will be an np array\n xyz_supercell = np.empty_like(fractional_supercell)\n for i,abc in enumerate(fractional_supercell):\n xyz_supercell[i] = self.convert_abc_to_xyz(abc)\n # put the atoms into boxes of boxSize\n BoxAtoms = {}\n BoxNeighbours = {}\n boxSize = 0.0\n # calculate boxsize\n rmax = 0.0\n for el in self.element_names:\n #jk print(\"Element name\",el)\n if covalent_radii[el] > rmax:\n rmax = covalent_radii[el]\n boxSize = 2.0*scale*rmax + 0.5 + toler\n #jk print('rmax = ',rmax)\n # Put atoms into boxes and store the box info in Atom_box_id\n Atom_box_id = []\n for i,xyz in enumerate(xyz_supercell):\n a = int( math.floor(xyz[0]/boxSize) )\n b = int( math.floor(xyz[1]/boxSize) )\n c = int( math.floor(xyz[2]/boxSize) )\n abc = (a,b,c)\n Atom_box_id.append(abc)\n try:\n BoxAtoms[abc].append(i)\n except:\n BoxAtoms[abc] = [i]\n # Calculate the neighbouring boxes for each occupied box\n for abc in BoxAtoms:\n #jk print('Box ',abc, BoxAtoms[abc])\n a,b,c = abc\n BoxNeighbours[abc] = []\n for i in [ -1, 0, 1]:\n for j in [ -1, 0, 1]:\n for k in [ -1, 0, 1]:\n BoxNeighbours[abc].append( (a+i,b+j,c+k) )\n # end for abc in Box1\n \n # Calculate the bonding the supercell \n bondedToAtom = {}\n for i,xyzi in enumerate(xyz_supercell):\n #jk print('Calculating bonding to ',i,xyzi)\n bondedToAtom[i] = []\n # Find the element name for this atom in the supercell\n ip = index_supercell[i]\n i_el = self.element_names[ip]\n # lookup all of the boxes that might hold a bonding atom\n cell = Atom_box_id[i]\n for abc in BoxNeighbours[cell]:\n # lookup all the atoms that are in that cell\n try: \n for j in BoxAtoms[abc]:\n if j < i:\n # Find the element name for this atom in the supercell\n jp = index_supercell[j]\n j_el = self.element_names[jp]\n dist1 = scale*( covalent_radii[i_el] + covalent_radii[j_el]) + toler\n dist2 = calculate_distance(xyzi,xyz_supercell[j])\n if dist2 < dist1:\n bondedToAtom[i].append(j)\n bondedToAtom[j].append(i)\n #jk print('new bond', i, j, i_el, j_el, xyzi, xyz_supercell[j], dist1, dist2)\n # end if dist2 < dist1\n # end if j < i\n # end for j\n except KeyError: \n pass\n # end for abc \n # end for i,a\n # \n # Now we have to find how many molecules we have in the cell\n # There must be at least one molecule in the cell and it must contain the first atom\n #\n # BelongsToMolecule[i] holds which molecule the ith atom belongs to\n belongsToMolecule = {}\n # molecules is a dictionary of molecules each entry is a is a list of atoms\n molecules = {}\n molID = -1\n # We stop when all the atoms in the original cell belong to a molecule\n remainingAtoms = [ atom for atom in range(self.nions) ]\n bonds = []\n while len(remainingAtoms) > 0:\n #jk print(\"Remaining atoms\")\n #jk print(remainingAtoms)\n # create a new molecule from the first atom which has no molecule assigned to it\n molID += 1\n useAtom = remainingAtoms[0]\n belongsToMolecule[useAtom] = molID\n molecules[molID] = [ useAtom ]\n remainingAtoms.remove(useAtom)\n # Now using this new molecule with its only atom as a seed find any atoms connected to it\n # We need to continue searching for bonded atoms until there are no more to be found\n moreAtomsToBeFound = True\n while moreAtomsToBeFound:\n moreAtomsToBeFound = False\n for i in range(len(xyz_supercell)):\n # has this atom been assigned a molecule yet?\n if i in belongsToMolecule:\n # atom i is already assigned to a molecule\n useThisMolecule = belongsToMolecule[i]\n #jk print(\"Using this molecule\", useThisMolecule)\n # Go through all the atoms bonded to i and add to the current molecule\n for j in bondedToAtom[i]:\n jx = index_supercell[j]\n #jk print(\"atom j / jx is bonded to atom i\",j,jx,i)\n # The image of j in the original cell might not be available, and j might be bonded\n if jx in remainingAtoms and not j in belongsToMolecule:\n # if j was not already in a molecule then we have new information\n moreAtomsToBeFound = True\n molecules[useThisMolecule].append(j)\n belongsToMolecule[j] = useThisMolecule\n #jk print(\"Removing atom index(j) from remaining atoms\",index_supercell[j])\n remainingAtoms.remove(jx)\n # The j'th atom could be already specified and we have a ring....\n # We also need to make sure that we have unique bonds\n if j in belongsToMolecule:\n if i > j and (i,j) not in bonds:\n bonds.append( (i,j) )\n elif i < j and (j,i) not in bonds:\n bonds.append( (j,i) )\n # end for j\n # end if i in\n # end for i\n # while moreAtomsToBeFound\n # until all the atoms belong to a molecule\n #jk print('Number of molecules', molID+1)\n self.centres_of_mass = []\n self.total_mass = 0.0\n for mol_index in molecules:\n #jk print('Molecule ',mol_index)\n #jk print('Atoms ',molecules[mol_index])\n for atom_index in molecules[mol_index]:\n index = index_supercell[atom_index]\n #jk print('New atom index, old index', atom_index, index, self.element_names[index])\n # Calculate centre of mass\n mass = 0.0\n cm = np.zeros(3)\n for atom_index in molecules[mol_index]:\n index = index_supercell[atom_index]\n mass += self.atomic_masses[index]\n cm = cm + self.atomic_masses[index] * xyz_supercell[atom_index]\n cm_xyz = cm / mass\n cm_fractional = self.convert_xyz_to_abc(cm_xyz)\n self.centres_of_mass.append( cm_fractional )\n #jk print('Mass', mass)\n #jk print('Centre of mass', cm_fractional)\n # Create a new unit cell with the atoms shifted so that whole molecules are ordered and within the cell\n new_molecules = []\n new_fractional = np.empty_like(self.fractional_coordinates)\n new_element_names = []\n new_index = 0\n new_masses = []\n old_order = []\n for mol_index in molecules:\n new_atom_index = []\n cm = self.centres_of_mass[mol_index]\n shift = np.array( [ 0.0, 0.0, 0.0] )\n if cm[0] < 0.0:\n shift += [ 1.0, 0.0, 0.0 ]\n elif cm[0] > 1.0:\n shift += [-1.0, 0.0, 0.0 ]\n if cm[1] < 0.0:\n shift += [ 0.0, 1.0, 0.0 ]\n elif cm[1] > 1.0:\n shift += [ 0.0,-1.0, 0.0 ]\n if cm[2] < 0.0:\n shift += [ 0.0, 0.0, 1.0 ]\n elif cm[2] > 1.0:\n shift += [ 0.0, 0.0,-1.0 ]\n for atom_index in molecules[mol_index]:\n old_index = index_supercell[atom_index]\n old_order.append(old_index)\n new_fractional[new_index] = shift + np.array(fractional_supercell[atom_index])\n new_element_names.append(self.element_names[old_index])\n new_masses.append(self.atomic_masses[old_index])\n new_atom_index.append(new_index)\n new_index += 1\n new_molecules.append(new_atom_index)\n # as well as being able to go from the new order and look up the old order\n # we need to be able to take the old order and look up what the new order is\n invert_old_order = np.zeros_like(old_order)\n for i,j in enumerate(old_order):\n invert_old_order[j] = i\n new_bonds = []\n for bond in bonds:\n i,j = bond\n ix = invert_old_order[index_supercell[i]]\n jx = invert_old_order[index_supercell[j]]\n new_bonds.append( (ix,jx) )\n new_unit_cell = UnitCell( self.a, self.b, self.c, self.alpha, self.beta, self.gamma )\n new_unit_cell.set_fractional_coordinates(new_fractional.tolist())\n new_unit_cell.set_element_names(new_element_names)\n new_unit_cell.set_atomic_masses(new_masses)\n new_unit_cell.set_molecules(new_molecules)\n new_unit_cell.set_bonds(new_bonds)\n return new_unit_cell, len(new_molecules), old_order", "def _calc_u_matrix_means(self) -> None:\n for u_node in itertools.product(\n range(self.n_rows * 2 - 1), range(self.n_columns * 2 - 1)\n ):\n if not (u_node[0] % 2) and not (u_node[1] % 2):\n # SOM nodes -> mean over 2-4 values\n\n nodelist = []\n if u_node[0] > 0:\n nodelist.append((u_node[0] - 1, u_node[1]))\n if u_node[0] < self.n_rows * 2 - 2:\n nodelist.append((u_node[0] + 1, u_node[1]))\n if u_node[1] > 0:\n nodelist.append((u_node[0], u_node[1] - 1))\n if u_node[1] < self.n_columns * 2 - 2:\n nodelist.append((u_node[0], u_node[1] + 1))\n self.u_matrix[u_node] = self._get_u_mean(nodelist)\n\n elif (u_node[0] % 2) and (u_node[1] % 2):\n # mean over four\n\n self.u_matrix[u_node] = self._get_u_mean(\n [\n (u_node[0] - 1, u_node[1]),\n (u_node[0] + 1, u_node[1]),\n (u_node[0], u_node[1] - 1),\n (u_node[0], u_node[1] + 1),\n ]\n )", "def constraint_B_k_mu_mu(self):\n ms = self.ms\n width_contr = 0.0\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in B_k_mu_mu_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mB - mk - ms) * (mB + mk - ms) * (mB - mk + ms) * (mB + mk + ms)\n ) / (2.0 * mB)\n # Probability that S decays close to the primary vertex\n pr_vis = 1.0 - np.exp(-B_k_mu_mu_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # print(pr_vis)\n # print(widths_s[\"mu mu\"] / width_s)\n\n # Compute the contribution to the mu mu decay width\n width_contr = self.width_B_k_s() * widths_s[\"mu mu\"] / width_s * pr_vis\n\n return B_k_mu_mu_obs.width_bound - width_contr", "def eg_bootmu():\n\n a = []\n b = []\n\n for _ in range(100):\n a.append(utils.gaussian(10, 1))\n\n print(\"\", \"mu\", \"sd\", \"cliffs\", \"boot\", \"both\", sep=\"\\t\")\n print(\"\", \"--\", \"--\", \"------\", \"----\", \"----\", sep=\"\\t\")\n\n for mu in range(100, 111):\n b = []\n\n for _ in range(100):\n b.append(utils.gaussian(mu / 10, 1))\n\n cl = utils.cliffsDelta(a, b)\n bs = stats.bootstrap(a, b)\n\n print(\"\", mu / 10, 1, cl, bs, cl and bs, sep=\"\\t\")", "def _map_elements3(self, nid_map, model, unused_j, unused_dim_max,\n nid_cp_cd, xref_loads=True):\n settings = self.gui.settings # type: Settings\n\n # these normals point inwards\n # 4\n # / | \\\n # / | \\\n # 3-------2\n # \\ | /\n # \\ | /\n # 1\n _ctetra_faces = (\n (0, 1, 2), # (1, 2, 3),\n (0, 3, 1), # (1, 4, 2),\n (0, 3, 2), # (1, 3, 4),\n (1, 3, 2), # (2, 4, 3),\n )\n\n # these normals point inwards\n #\n #\n #\n #\n # /4-----3\n # / /\n # / 5 /\n # / \\ /\n # / \\ /\n # 1---------2\n _cpyram_faces = (\n (0, 1, 2, 3), # (1, 2, 3, 4),\n (1, 4, 2), # (2, 5, 3),\n (2, 4, 3), # (3, 5, 4),\n (0, 3, 4), # (1, 4, 5),\n (0, 4, 1), # (1, 5, 2),\n )\n\n # these normals point inwards\n # /6\n # / | \\\n # / | \\\n # 3\\ | \\\n # | \\ /4-----5\n # | \\/ /\n # | / \\ /\n # | / \\ /\n # | / \\ /\n # 1---------2\n _cpenta_faces = (\n (0, 2, 1), # (1, 3, 2),\n (3, 4, 5), # (4, 5, 6),\n\n (0, 1, 4, 3), # (1, 2, 5, 4), # bottom\n (1, 2, 5, 4), # (2, 3, 6, 5), # right\n (0, 3, 5, 2), # (1, 4, 6, 3), # left\n )\n\n # these normals point inwards\n # 8----7\n # /| /|\n # / | / |\n # / 5-/--6\n # 4-----3 /\n # | / | /\n # | / | /\n # 1-----2\n _chexa_faces = (\n (4, 5, 6, 7), # (5, 6, 7, 8),\n (0, 3, 2, 1), # (1, 4, 3, 2),\n (1, 2, 6, 5), # (2, 3, 7, 6),\n (2, 3, 7, 6), # (3, 4, 8, 7),\n (0, 4, 7, 3), # (1, 5, 8, 4),\n (0, 6, 5, 4), # (1, 7, 6, 5),\n )\n\n elements, nelements, unused_superelements = get_elements_nelements_unvectorized(model)\n xyz_cid0 = self.xyz_cid0\n pids_array = np.zeros(nelements, dtype='int32')\n eids_array = np.zeros(nelements, dtype='int32')\n mcid_array = np.full(nelements, -1, dtype='int32')\n material_theta_array = np.full(nelements, np.nan, dtype='float32')\n dim_array = np.full(nelements, -1, dtype='int32')\n nnodes_array = np.full(nelements, -1, dtype='int32')\n\n # quality\n min_interior_angle = np.zeros(nelements, 'float32')\n max_interior_angle = np.zeros(nelements, 'float32')\n dideal_theta = np.zeros(nelements, 'float32')\n max_skew_angle = np.zeros(nelements, 'float32')\n max_warp_angle = np.zeros(nelements, 'float32')\n max_aspect_ratio = np.zeros(nelements, 'float32')\n area = np.zeros(nelements, 'float32')\n area_ratio = np.zeros(nelements, 'float32')\n taper_ratio = np.zeros(nelements, 'float32')\n min_edge_length = np.zeros(nelements, 'float32')\n normals = np.full((nelements, 3), np.nan, 'float32')\n\n nids_list = []\n ieid = 0\n cell_offset = 0\n\n dtype = get_numpy_idtype_for_vtk()\n\n cell_types_array = np.zeros(nelements, dtype=dtype)\n cell_offsets_array = np.zeros(nelements, dtype=dtype)\n\n cell_type_point = 1 # vtk.vtkVertex().GetCellType()\n cell_type_line = 3 # vtk.vtkLine().GetCellType()\n cell_type_tri3 = 5 # vtkTriangle().GetCellType()\n cell_type_tri6 = 22 # vtkQuadraticTriangle().GetCellType()\n cell_type_quad4 = 9 # vtkQuad().GetCellType()\n #cell_type_quad8 = 23 # vtkQuadraticQuad().GetCellType()\n cell_type_tetra4 = 10 # vtkTetra().GetCellType()\n cell_type_tetra10 = 24 # vtkQuadraticTetra().GetCellType()\n cell_type_pyram5 = 14 # vtkPyramid().GetCellType()\n #cell_type_pyram13 = 27 # vtk.vtkQuadraticPyramid().GetCellType()\n cell_type_penta6 = 13 # vtkWedge().GetCellType()\n cell_type_penta15 = 26 # vtkQuadraticWedge().GetCellType()\n cell_type_hexa8 = 12 # vtkHexahedron().GetCellType()\n cell_type_hexa20 = 25 # vtkQuadraticHexahedron().GetCellType()\n\n # per gui/testing_methods.py/create_vtk_cells_of_constant_element_type\n #1 = vtk.vtkVertex().GetCellType()\n #3 = vtkLine().GetCellType()\n #5 = vtkTriangle().GetCellType()\n #9 = vtk.vtkQuad().GetCellType()\n #10 = vtkTetra().GetCellType()\n #vtkPenta().GetCellType()\n #vtkHexa().GetCellType()\n #vtkPyram().GetCellType()\n\n skipped_etypes = set()\n all_nids = nid_cp_cd[:, 0]\n ieid = 0\n for eid, elem in sorted(elements.items()):\n if ieid % 5000 == 0 and ieid > 0:\n print(' map_elements = %i' % ieid)\n etype = elem.type\n nnodes = None\n nids = None\n pid = None\n cell_type = None\n inids = None\n\n dideal_thetai = np.nan\n min_thetai = np.nan\n max_thetai = np.nan\n #max_thetai = np.nan\n max_skew = np.nan\n max_warp = np.nan\n aspect_ratio = np.nan\n areai = np.nan\n area_ratioi = np.nan\n taper_ratioi = np.nan\n min_edge_lengthi = np.nan\n normali = np.nan\n if etype in ['CTRIA3', 'CTRIAR', 'CTRAX3', 'CPLSTN3', 'CPLSTS3']:\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_tri3 # 5\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3 = xyz_cid0[inids, :]\n out = tri_quality(p1, p2, p3)\n (areai, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out\n normali = np.cross(p1 - p2, p1 - p3)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 3\n dim = 2\n\n elif etype in {'CQUAD4', 'CQUADR', 'CPLSTN4', 'CPLSTS4', 'CQUADX4',\n 'CQUAD1'}: # nastran95\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_quad4 #9\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 4\n dim = 2\n\n elif etype in ['CTRIA6']:\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tri3\n inids = np.searchsorted(all_nids, nids[:3])\n nids = nids[:3]\n p1, p2, p3 = xyz_cid0[inids, :]\n nnodes = 3\n else:\n cell_type = cell_type_tri6\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4, unused_p5, unused_p6 = xyz_cid0[inids, :]\n nnodes = 6\n out = tri_quality(p1, p2, p3)\n (areai, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out\n normali = np.cross(p1 - p2, p1 - p3)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n dim = 2\n elif etype == 'CQUAD8':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tri3\n inids = np.searchsorted(all_nids, nids[:4])\n nids = nids[:4]\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n nnodes = 4\n else:\n cell_type = cell_type_tri6\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids[:4], :]\n nnodes = 8\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n if isinstance(elem.theta_mcid, float):\n material_theta_array[ieid] = elem.theta_mcid\n else:\n mcid_array[ieid] = elem.theta_mcid\n nnodes = 4\n dim = 2\n\n elif etype == 'CSHEAR':\n nids = elem.nodes\n pid = elem.pid\n cell_type = cell_type_quad4 #9\n inids = np.searchsorted(all_nids, nids)\n p1, p2, p3, p4 = xyz_cid0[inids, :]\n out = quad_quality(elem, p1, p2, p3, p4)\n (areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out\n normali = np.cross(p1 - p3, p2 - p4)\n nnodes = 4\n dim = 2\n\n elif etype == 'CTETRA':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_tetra4\n nids = nids[:4]\n nnodes = 4\n else:\n cell_type = cell_type_tetra10\n nnodes = 10\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _ctetra_faces, nids, nid_map, xyz_cid0)\n dim = 3\n\n elif etype == 'CHEXA':\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_hexa8\n nids = nids[:8]\n nnodes = 8\n else:\n cell_type = cell_type_hexa20\n nnodes = 20\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _chexa_faces, nids, nid_map, xyz_cid0)\n dim = 3\n\n elif etype == 'CPENTA':\n nids = elem.nodes\n pid = elem.pid\n\n if None in nids:\n cell_type = cell_type_penta6\n nids = nids[:6]\n nnodes = 6\n else:\n cell_type = cell_type_penta15\n nnodes = 15\n\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _cpenta_faces, nids, nid_map, xyz_cid0)\n dim = 3\n elif etype == 'CPYRAM':\n # TODO: assuming 5\n nids = elem.nodes\n pid = elem.pid\n if None in nids:\n cell_type = cell_type_pyram5\n nids = nids[:5]\n nnodes = 5\n else:\n cell_type = cell_type_penta15\n nnodes = 15\n inids = np.searchsorted(all_nids, nids)\n min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n _cpyram_faces, nids, nid_map, xyz_cid0)\n dim = 3\n elif etype in ['CELAS2', 'CELAS4', 'CDAMP4']:\n # these can have empty nodes and have no property\n # CELAS1: 1/2 GRID/SPOINT and pid\n # CELAS2: 1/2 GRID/SPOINT, k, ge, and s\n # CELAS3: 1/2 SPOINT and pid\n # CELAS4: 1/2 SPOINT and k\n nids = elem.nodes\n assert nids[0] != nids[1]\n if None in nids:\n assert nids[0] is not None, nids\n assert nids[1] is None, nids\n nids = [nids[0]]\n cell_type = cell_type_point\n nnodes = 1\n else:\n nids = elem.nodes\n assert nids[0] != nids[1]\n cell_type = cell_type_line\n nnodes = 2\n inids = np.searchsorted(all_nids, nids)\n pid = 0\n dim = 0\n elif etype in ['CBUSH', 'CBUSH1D', 'CBUSH2D',\n 'CELAS1', 'CELAS3',\n 'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP5',\n 'CFAST', 'CGAP', 'CVISC']:\n nids = elem.nodes\n assert nids[0] != nids[1]\n assert None not in nids, 'nids=%s\\n%s' % (nids, elem)\n pid = elem.pid\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n nnodes = 2\n dim = 0\n elif etype in ['CBAR', 'CBEAM']:\n nids = elem.nodes\n pid = elem.pid\n pid_ref = model.Property(pid)\n areai = pid_ref.Area()\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n elif etype in ['CROD', 'CTUBE']:\n nids = elem.nodes\n pid = elem.pid\n pid_ref = model.Property(pid)\n areai = pid_ref.Area()\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n elif etype == 'CONROD':\n nids = elem.nodes\n areai = elem.Area()\n pid = 0\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = 2\n dim = 1\n #------------------------------\n # rare\n #elif etype == 'CIHEX1':\n #nids = elem.nodes\n #pid = elem.pid\n #cell_type = cell_type_hexa8\n #inids = np.searchsorted(all_nids, nids)\n #min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(\n #_chexa_faces, nids, nid_map, xyz_cid0)\n #nnodes = 8\n #dim = 3\n elif etype == 'CHBDYE':\n #self.eid_map[eid] = ieid\n eid_solid = elem.eid2\n side = elem.side\n element_solid = model.elements[eid_solid]\n\n mapped_inids = SIDE_MAP[element_solid.type][side]\n side_inids = [nid - 1 for nid in mapped_inids]\n nodes = element_solid.node_ids\n\n pid = 0\n nnodes = len(side_inids)\n nids = [nodes[inid] for inid in side_inids]\n inids = np.searchsorted(all_nids, nids)\n\n if len(side_inids) == 4:\n cell_type = cell_type_quad4\n else:\n msg = 'element_solid:\\n%s' % (str(element_solid))\n msg += 'mapped_inids = %s\\n' % mapped_inids\n msg += 'side_inids = %s\\n' % side_inids\n msg += 'nodes = %s\\n' % nodes\n #msg += 'side_nodes = %s\\n' % side_nodes\n raise NotImplementedError(msg)\n elif etype == 'GENEL':\n nids = []\n if len(elem.ul_nodes):\n nids.append(elem.ul_nodes)\n if len(elem.ud_nodes):\n nids.append(elem.ud_nodes)\n nids = np.unique(np.hstack(nids))\n #print(elem.get_stats())\n nids = nids[:2]\n\n areai = np.nan\n pid = 0\n cell_type = cell_type_line\n inids = np.searchsorted(all_nids, nids)\n p1, p2 = xyz_cid0[inids, :]\n min_edge_lengthi = norm(p2 - p1)\n nnodes = len(nids)\n dim = 1\n else:\n #raise NotImplementedError(elem)\n skipped_etypes.add(etype)\n nelements -= 1\n continue\n #for nid in nids:\n #assert isinstance(nid, integer_types), 'not an integer. nids=%s\\n%s' % (nids, elem)\n #assert nid != 0, 'not a positive integer. nids=%s\\n%s' % (nids, elem)\n\n assert inids is not None\n if not np.array_equal(all_nids[inids], nids):\n msg = 'all_nids[inids]=%s nids=%s\\n%s' % (all_nids[inids], nids, elem)\n raise RuntimeError(msg)\n\n assert cell_type is not None\n assert cell_offset is not None\n assert eid is not None\n assert pid is not None\n assert dim is not None\n assert nnodes is not None\n nids_list.append(nnodes)\n nids_list.extend(inids)\n normals[ieid] = normali\n eids_array[ieid] = eid\n pids_array[ieid] = pid\n dim_array[ieid] = dim\n cell_types_array[ieid] = cell_type\n cell_offsets_array[ieid] = cell_offset # I assume the problem is here\n cell_offset += nnodes + 1\n self.eid_map[eid] = ieid\n\n min_interior_angle[ieid] = min_thetai\n max_interior_angle[ieid] = max_thetai\n dideal_theta[ieid] = dideal_thetai\n max_skew_angle[ieid] = max_skew\n max_warp_angle[ieid] = max_warp\n max_aspect_ratio[ieid] = aspect_ratio\n area[ieid] = areai\n area_ratio[ieid] = area_ratioi\n taper_ratio[ieid] = taper_ratioi\n min_edge_length[ieid] = min_edge_lengthi\n ieid += 1\n\n #print('self.eid_map =', self.eid_map)\n\n icells_zero = np.where(cell_types_array == 0)[0]\n # TODO: I'd like to get rid of deep=1, but it'll crash the edges\n deep = 1\n if len(icells_zero):\n icells = np.where(cell_types_array != 0)[0]\n if len(icells) == 0:\n self.log.error('skipped_etypes = %s' % skipped_etypes)\n raise RuntimeError('there are no elements...')\n eids_array = eids_array[icells]\n pids_array = pids_array[icells]\n #dim_array = pids_array[dim_array]\n cell_types_array = cell_types_array[icells]\n cell_offsets_array = cell_offsets_array[icells]\n nnodes_array = nnodes_array[icells]\n normals = normals[icells, :]\n #deep = 1\n #print('deep = %s' % deep)\n if skipped_etypes:\n self.log.error('skipped_etypes = %s' % list(skipped_etypes))\n #print('skipped_etypes = %s' % skipped_etypes)\n if len(pids_array) != nelements:\n msg = 'nelements=%s len(pids_array)=%s' % (nelements, len(pids_array))\n raise RuntimeError(msg)\n if len(cell_offsets_array) != nelements:\n msg = 'nelements=%s len(cell_offsets_array)=%s' % (nelements, len(cell_offsets_array))\n raise RuntimeError(msg)\n\n nids_array = np.array(nids_list, dtype=dtype)\n\n #-----------------------------------------------------------------\n # saving some data members\n self.element_ids = eids_array\n\n #print('cell_types_array* = ', cell_types_array.tolist())\n #print('cell_offsets_array* = ', cell_offsets_array.tolist())\n\n #-----------------------------------------------------------------\n # build the grid\n\n #self.log.info('nids_array = %s' % nids_array)\n #self.log.info('cell_offsets_array = %s' % cell_offsets_array)\n #self.log.info('cell_types_array = %s' % cell_types_array)\n\n # Create the array of cells\n cells_id_type = numpy_to_vtkIdTypeArray(nids_array, deep=1)\n vtk_cells = vtk.vtkCellArray()\n vtk_cells.SetCells(nelements, cells_id_type)\n\n # Cell types\n vtk_cell_types = numpy_to_vtk(\n cell_types_array, deep=deep,\n array_type=vtk.vtkUnsignedCharArray().GetDataType())\n\n vtk_cell_offsets = numpy_to_vtk(cell_offsets_array, deep=deep,\n array_type=vtk.VTK_ID_TYPE)\n\n grid = self.grid\n #grid = vtk.vtkUnstructuredGrid()\n grid.SetCells(vtk_cell_types, vtk_cell_offsets, vtk_cells)\n\n #-----------------------------------------------------------------\n # fill the results\n nid_to_pid_map = None\n self.isubcase_name_map = {1: ['Nastran', '']}\n icase = 0\n cases = OrderedDict()\n form = ['Geometry', None, []]\n form0 = form[2]\n\n subcase_id = 0\n\n #nids_set = True\n #if nids_set:\n # this intentionally makes a deepcopy\n #nids = np.array(nid_cp_cd[:, 0])\n\n # this intentionally makes a deepcopy\n cds = np.array(nid_cp_cd[:, 2])\n colormap = settings.colormap\n nid_res = GuiResult(subcase_id, 'NodeID', 'NodeID', 'node', all_nids,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (nid_res, (0, 'Node ID'))\n form0.append(('Node ID', icase, []))\n icase += 1\n\n if cds.max() > 0:\n cd_res = GuiResult(0, header='NodeCd', title='NodeCd',\n location='node', scalar=cds)\n cases[icase] = (cd_res, (0, 'NodeCd'))\n form0.append(('NodeCd', icase, []))\n icase += 1\n\n eid_res = GuiResult(subcase_id, 'ElementID', 'ElementID', 'centroid', eids_array,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (eid_res, (0, 'ElementID'))\n form0.append(('ElementID', icase, []))\n icase += 1\n\n is_element_dim = True\n #if len(np.unique(dim_array)) > 1:\n #dim_res = GuiResult(subcase_id, 'ElementDim', 'ElementDim', 'centroid', dim_array,\n #mask_value=-1,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (dim_res, (0, 'ElementDim'))\n #form0.append(('ElementDim', icase, []))\n #icase += 1\n\n if nnodes_array.max() > -1:\n nnodes_res = GuiResult(subcase_id, 'NNodes/Elem', 'NNodes/Elem',\n 'centroid', nnodes_array,\n mask_value=0,\n nlabels=None,\n labelsize=None,\n ncolors=None,\n colormap=colormap,\n data_format=None,\n uname='GuiResult')\n cases[icase] = (nnodes_res, (0, 'NNodes/Elem'))\n form0.append(('NNodes/Elem', icase, []))\n icase += 1\n\n #pid_res = GuiResult(subcase_id, 'PropertyID', 'PropertyID', 'centroid', pids_array,\n #mask_value=0,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (pid_res, (0, 'PropertyID'))\n #form0.append(('PropertyID', icase, []))\n #icase += 1\n\n if len(model.properties) and nelements and settings.nastran_is_properties:\n icase, upids, pcomp, pshell, is_pshell_pcomp = self._build_properties(\n model, nelements, eids_array, pids_array, cases, form0, icase)\n icase = _build_materials(model, pcomp, pshell, is_pshell_pcomp,\n cases, form0, icase)\n try:\n icase = _build_optimization(model, pids_array, upids,\n nelements, cases, form0, icase)\n except Exception:\n #raise\n s = StringIO()\n traceback.print_exc(file=s)\n sout = s.getvalue()\n self.gui.log_error(sout)\n print(sout)\n\n #if isgreater_int(mcid_array, -1):\n #mcid_res = GuiResult(subcase_id, 'Material Coordinate System', 'MaterialCoord',\n #'centroid', mcid_array,\n #mask_value=-1,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (mcid_res, (0, 'Material Coordinate System'))\n #form0.append(('Material Coordinate System', icase, []))\n #icase += 1\n\n #if np.isfinite(theta_array).any():\n #print('np.nanmax(theta_array) =', np.nanmax(theta_array))\n #theta_res = GuiResult(subcase_id, 'Theta', 'Theta', 'centroid', theta_array,\n #mask_value=None,\n #nlabels=None,\n #labelsize=None,\n #ncolors=None,\n #colormap=colormap,\n #data_format=None,\n #uname='GuiResult')\n #cases[icase] = (theta_res, (0, 'Theta'))\n #form0.append(('Theta', icase, []))\n #icase += 1\n\n normal_mag = underflow_norm(normals, axis=1)\n assert len(normal_mag) == nelements\n normals /= normal_mag.reshape(nelements, 1)\n i_not_nan = np.isnan(normal_mag)\n\n #if self.make_offset_normals_dim and nelements:\n #material_coord = None\n #icase, normals = _build_normals_quality(\n #model, self.gui.eid_map, nelements, cases, form0, icase,\n #xyz_cid0, material_coord, material_theta,\n #min_interior_angle, max_interior_angle, dideal_theta,\n #area, max_skew_angle, taper_ratio,\n #max_warp_angle, area_ratio, min_edge_length, max_aspect_ratio,\n #make_offset_normals_dim=self.make_offset_normals_dim)\n #self.normals = normals\n\n #----------------------------------------------------------\n\n is_shell = False\n if False in i_not_nan:\n #max_normal = np.nanmax(normal_mag[i_not_nan])\n #is_shell = np.abs(max_normal) > 0.\n is_shell = True\n is_solid = isfinite_and_nonzero(max_interior_angle)\n #print('is_shell=%s is_solid=%s' % (is_shell, is_solid))\n if is_shell:\n nx_res = GuiResult(\n 0, header='NormalX', title='NormalX',\n location='centroid', scalar=normals[:, 0], data_format='%.2f')\n ny_res = GuiResult(\n 0, header='NormalY', title='NormalY',\n location='centroid', scalar=normals[:, 1], data_format='%.2f')\n nz_res = GuiResult(\n 0, header='NormalZ', title='NormalZ',\n location='centroid', scalar=normals[:, 2], data_format='%.2f')\n nxyz_res = NormalResult(0, 'Normals', 'Normals',\n nlabels=2, labelsize=5, ncolors=2,\n colormap=colormap, data_format='%.1f',\n uname='NormalResult')\n\n\n area_res = GuiResult(0, header='Area', title='Area',\n location='centroid', scalar=area)\n min_edge_length_res = GuiResult(\n 0, header='Min Edge Length', title='Min Edge Length',\n location='centroid', scalar=min_edge_length)\n\n min_theta_res = GuiResult(\n 0, header='Min Interior Angle', title='Min Interior Angle',\n location='centroid', scalar=np.degrees(min_interior_angle))\n max_theta_res = GuiResult(\n 0, header='Max Interior Angle', title='Max Interior Angle',\n location='centroid', scalar=np.degrees(max_interior_angle))\n dideal_theta_res = GuiResult(\n 0, header='Delta Ideal Angle', title='Delta Ideal Angle',\n location='centroid', scalar=np.degrees(dideal_theta))\n\n skew = np.degrees(max_skew_angle)\n skew_res = GuiResult(\n 0, header='Max Skew Angle', title='MaxSkewAngle',\n location='centroid', scalar=skew)\n aspect_res = GuiResult(\n 0, header='Aspect Ratio', title='AspectRatio',\n location='centroid', scalar=max_aspect_ratio)\n\n form_checks = []\n form0.append(('Element Checks', None, form_checks))\n if is_element_dim:\n form_checks.append(('ElementDim', icase, []))\n\n if self.make_offset_normals_dim and self.make_nnodes_result and 0: # pragma: no cover\n nnodes_res = GuiResult(\n 0, header='NNodes/Elem', title='NNodes/Elem',\n location='centroid', scalar=nnodes_array)\n form_checks.append(('NNodes', icase + 1, []))\n cases[icase + 1] = (nnodes_res, (0, 'NNodes'))\n icase += 1\n\n if self.make_offset_normals_dim or 1:\n cases[icase + 1] = (nx_res, (0, 'NormalX'))\n cases[icase + 2] = (ny_res, (0, 'NormalY'))\n cases[icase + 3] = (nz_res, (0, 'NormalZ'))\n cases[icase + 4] = (nxyz_res, (0, 'Normal'))\n\n form_checks.append(('NormalX', icase + 1, []))\n form_checks.append(('NormalY', icase + 2, []))\n form_checks.append(('NormalZ', icase + 3, []))\n form_checks.append(('Normal', icase + 4, []))\n\n cases[icase + 5] = (area_res, (0, 'Area'))\n cases[icase + 6] = (min_edge_length_res, (0, 'Min Edge Length'))\n cases[icase + 7] = (min_theta_res, (0, 'Min Interior Angle'))\n cases[icase + 8] = (max_theta_res, (0, 'Max Interior Angle'))\n cases[icase + 9] = (dideal_theta_res, (0, 'Delta Ideal Angle'))\n cases[icase + 10] = (skew_res, (0, 'Max Skew Angle'))\n cases[icase + 11] = (aspect_res, (0, 'Aspect Ratio'))\n\n form_checks.append(('Area', icase + 5, []))\n form_checks.append(('Min Edge Length', icase + 6, []))\n form_checks.append(('Min Interior Angle', icase + 7, []))\n form_checks.append(('Max Interior Angle', icase + 8, []))\n form_checks.append(('Delta Ideal Angle', icase + 9, []))\n form_checks.append(('Max Skew Angle', icase + 10, []))\n form_checks.append(('Aspect Ratio', icase + 11, []))\n icase += 12\n\n if np.any(np.isfinite(area_ratio)) and np.nanmax(area_ratio) > 1.:\n arearatio_res = GuiResult(\n 0, header='Area Ratio', title='Area Ratio',\n location='centroid', scalar=area_ratio)\n cases[icase] = (arearatio_res, (0, 'Area Ratio'))\n form_checks.append(('Area Ratio', icase, []))\n icase += 1\n\n if np.any(np.isfinite(taper_ratio)) and np.nanmax(taper_ratio) > 1.:\n taperratio_res = GuiResult(\n 0, header='Taper Ratio', title='Taper Ratio',\n location='centroid', scalar=taper_ratio)\n cases[icase] = (taperratio_res, (0, 'Taper Ratio'))\n form_checks.append(('Taper Ratio', icase, []))\n icase += 1\n\n if isfinite_and_nonzero(max_warp_angle):\n warp_res = GuiResult(\n 0, header='Max Warp Angle', title='MaxWarpAngle',\n location='centroid', scalar=np.degrees(max_warp_angle))\n cases[icase + 4] = (warp_res, (0, 'Max Warp Angle'))\n form_checks.append(('Max Warp Angle', icase, []))\n icase += 1\n\n #if (np.abs(xoffset).max() > 0.0 or np.abs(yoffset).max() > 0.0 or\n #np.abs(zoffset).max() > 0.0):\n # offsets\n #offset_res = GuiResult(\n #0, header='Offset', title='Offset',\n #location='centroid', scalar=offset, data_format='%g')\n #offset_x_res = GuiResult(\n #0, header='OffsetX', title='OffsetX',\n #location='centroid', scalar=xoffset, data_format='%g')\n #offset_y_res = GuiResult(\n #0, header='OffsetY', title='OffsetY',\n #location='centroid', scalar=yoffset, data_format='%g')\n #offset_z_res = GuiResult(\n #0, header='OffsetZ', title='OffsetZ',\n #location='centroid', scalar=zoffset, data_format='%g')\n\n #cases[icase] = (offset_res, (0, 'Offset'))\n #cases[icase + 1] = (offset_x_res, (0, 'OffsetX'))\n #cases[icase + 2] = (offset_y_res, (0, 'OffsetY'))\n #cases[icase + 3] = (offset_z_res, (0, 'OffsetZ'))\n\n #form_checks.append(('Offset', icase, []))\n #form_checks.append(('OffsetX', icase + 1, []))\n #form_checks.append(('OffsetY', icase + 2, []))\n #form_checks.append(('OffsetZ', icase + 3, []))\n #icase += 4\n\n if self.make_xyz or IS_TESTING:\n x_res = GuiResult(\n 0, header='X', title='X',\n location='node', scalar=xyz_cid0[:, 0], data_format='%g')\n y_res = GuiResult(\n 0, header='Y', title='Y',\n location='node', scalar=xyz_cid0[:, 1], data_format='%g')\n z_res = GuiResult(\n 0, header='Z', title='Z',\n location='node', scalar=xyz_cid0[:, 2], data_format='%g')\n cases[icase] = (x_res, (0, 'X'))\n cases[icase + 1] = (y_res, (0, 'Y'))\n cases[icase + 2] = (z_res, (0, 'Z'))\n form_checks.append(('X', icase + 0, []))\n form_checks.append(('Y', icase + 1, []))\n form_checks.append(('Z', icase + 2, []))\n icase += 3\n\n elif is_solid:\n # only solid elements\n form_checks = []\n form0.append(('Element Checks', None, form_checks))\n\n min_edge_length_res = GuiResult(\n 0, header='Min Edge Length', title='Min Edge Length',\n location='centroid', scalar=min_edge_length)\n min_theta_res = GuiResult(\n 0, header='Min Interior Angle', title='Min Interior Angle',\n location='centroid', scalar=np.degrees(min_interior_angle))\n max_theta_res = GuiResult(\n 0, header='Max Interior Angle', title='Max Interior Angle',\n location='centroid', scalar=np.degrees(max_interior_angle))\n skew = 90. - np.degrees(max_skew_angle)\n #skew_res = GuiResult(0, header='Max Skew Angle', title='MaxSkewAngle',\n #location='centroid', scalar=skew)\n if is_element_dim:\n form_checks.append(('ElementDim', icase, []))\n form_checks.append(('Min Edge Length', icase + 1, []))\n form_checks.append(('Min Interior Angle', icase + 2, []))\n form_checks.append(('Max Interior Angle', icase + 3, []))\n form_checks.append(('Max Skew Angle', icase + 4, []))\n cases[icase + 1] = (min_edge_length_res, (0, 'Min Edge Length'))\n cases[icase + 2] = (min_theta_res, (0, 'Min Interior Angle'))\n cases[icase + 3] = (max_theta_res, (0, 'Max Interior Angle'))\n #cases[icase + 4] = (skew_res, (0, 'Max Skew Angle'))\n icase += 4\n\n else:\n form0.append(('ElementDim', icase, []))\n icase += 1\n\n if isgreater_int(mcid_array, -1):\n material_coord_res = GuiResult(\n 0, header='MaterialCoord', title='MaterialCoord',\n location='centroid',\n scalar=mcid_array, mask_value=-1, data_format='%i')\n cases[icase] = (material_coord_res, (0, 'MaterialCoord'))\n form0.append(('MaterialCoord', icase, []))\n icase += 1\n if isfinite(material_theta_array):\n material_theta_res = GuiResult(\n 0, header='MaterialTheta', title='MaterialTheta',\n location='centroid',\n scalar=material_theta_array, data_format='%.3f')\n cases[icase] = (material_theta_res, (0, 'MaterialTheta'))\n form0.append(('MaterialTheta', icase, []))\n icase += 1\n\n #print(normals)\n #----------------------------------------------------------\n # finishing up vtk\n if nelements and isfinite(min_edge_length):\n mean_edge_length = np.nanmean(min_edge_length)\n self.set_glyph_scale_factor(mean_edge_length * 2.5) # was 1.5\n\n grid.Modified()\n #----------------------------------------------------------\n # finishing up parameters\n self.node_ids = all_nids\n self.normals = normals\n\n return nid_to_pid_map, icase, cases, form", "def get_effect_size(self, summ, b, nmc=5000):\n m0b, v0b = self.DModel.models[0].predict(np.array([b])) \n m1b, v1b = self.DModel.models[1].predict(np.array([b]))\n \n d_mean_D = np.squeeze(m1b - m0b) # TODO: why was this swapped around?\n d_var_D = np.squeeze(v0b + v1b)\n d_std_D = np.sqrt(d_var_D)\n \n if d_mean_D < 0:\n pval = 1 - stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n else:\n pval = stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n \n xmin, xmax = (np.min([d_mean_D - 4*d_std_D, -0.1*d_std_D]), \n np.max([d_mean_D + 4*d_std_D, 0.1*d_std_D]))\n \n n = 300\n xrange = np.linspace(xmin, xmax, n)\n y = stats.norm.pdf(xrange, d_mean_D, d_std_D) \n \n samples = np.zeros((nmc))\n nspike = int(np.round(summ['pmp']['pmc']*nmc))\n samples[nspike:] = np.random.normal(loc=d_mean_D, \n scale=np.sqrt(d_var_D), \n size=(nmc-nspike))\n \n if not np.isscalar(b):\n d_bma = None\n else:\n \n if nspike==nmc:\n # BMA dominated by continuous model\n # Put all mass at xrange closest to b\n d_bma = np.zeros((n))\n xdelta = xrange[1] - xrange[0]\n ix = np.argmin((xrange-b)**2)\n d_bma[ix] = 1.0 / xdelta\n elif nspike==0:\n # BMA dominated by discontinuous model\n d_bma = y\n else:\n # BMA is a mixture\n kde_fit = stats.gaussian_kde(samples, \n bw_method='silverman')\n d_bma = kde_fit(xrange)\n \n return {'es_BMA': d_bma,\n 'es_Disc': y,\n 'es_disc_stats': (d_mean_D, d_std_D),\n 'pval': pval,\n 'es_range': xrange,\n 'f(b)': (m0b, m1b),\n 'es_transform': lambda z: z*d_std_D + d_mean_D}", "def plot_initial_geometry(ni=0.0, mu=0.5):", "def _getBMat(self):\n\n \"\"\"B matrix is just a mass matrix, can be easily assembled\n through fenics. However, the ordering in Fenics is not the\n mesh ordering. So we build a temp matrix and then use the\n vertex to dof map to get the right ordering interms of our\n mesh nodes.\n \"\"\"\n\n # create function space of order 1. For KL, we only restrict\n # to first order spaces.\n V = FunctionSpace(self._mesh, \"CG\", 1)\n # Define basis and bilinear form\n u = TrialFunction(V)\n v = TestFunction(V)\n a = u * v * dx\n # assemble in a temp matrix\n B_temp = assemble(a)\n\n # create petsc matrix B\n B = PETSc.Mat().create()\n B.setType('aij')\n B.setSizes(self.domain.getNodes(), self.domain.getNodes())\n B.setUp()\n\n # store the value in a a temp array B_ij\n B_ij = B_temp.array()\n\n # get the vertex to dof map\n v_to_d_map = vertex_to_dof_map(V)\n\n print '---------------------------'\n print '---------------------------'\n print ' Building Mass Matrix '\n print '---------------------------'\n print '---------------------------'\n for node_i in range(0, self.domain.getNodes()):\n for node_j in range(node_i, self.domain.getNodes()):\n B_ij_nodes = B_ij[v_to_d_map[node_i], v_to_d_map[node_j]]\n if B_ij_nodes > 0:\n B.setValue(node_i, node_j, B_ij_nodes)\n B.setValue(node_j, node_i, B_ij_nodes)\n\n B.assemblyBegin()\n B.assemblyEnd()\n print '---------------------------'\n print '---------------------------'\n print ' Finished Mass Matrix '\n print '---------------------------'\n print '---------------------------'\n return B", "def create_matrix_mapping_with_neighbours(data_mh, embedding_model, train_mh_index_map):\n mh_index_map = {}\n # extract a matrix that contains only embeddings of training data\n train_vectors = embedding_model.wv.syn0[np.array(list(train_mh_index_map.keys()))]\n # dictionary: index in small matrix - real word index\n vector_index_map = dict(zip(np.arange(0, train_vectors.shape[0]), list(train_mh_index_map.keys())))\n for vector_idx in data_mh:\n # look if an instance is in train\n if vector_idx not in train_mh_index_map.keys():\n # get the vector\n word_vec = embedding_model.wv.syn0[vector_idx]\n # calculate the dot product between the current vector and vectors from train\n similarities = np.dot(train_vectors, word_vec)\n # get the one with highest similarity\n nearest = np.argmax(similarities)\n # lookup the wordindex based on the vector index of the nearest neighbour of that small matrix\n nearest_idx = vector_index_map[nearest]\n # lookup the matrix idx of that word and put it in the map\n nearest_n_matrix_idx = train_mh_index_map[nearest_idx]\n mh_index_map[vector_idx] = nearest_n_matrix_idx\n else:\n matrix_idx = train_mh_index_map[vector_idx]\n mh_index_map[vector_idx] = matrix_idx\n return mh_index_map", "def lattice_sums(self,R):\n \n factor = (mu0*mB**2)/(32*k*pi)/((10**(-10))**3)\n\n A = {}\n B = {}\n\n for jNumber in np.arange(1,9):\n\n #obtain a sphere of lattice points of radius R centred on ion position 1A\n iVector = self.position['1A'] \n vertices,_ = self.spherical_bravais_lattice(R,1,'A',jNumber,'A',lattice_multiplier=2) \n \n #calculate the relative position of every lattice point\n x = vertices[:,0] - iVector[0] \n y = vertices[:,1] - iVector[1]\n z = vertices[:,2] - iVector[2]\n r = np.sqrt(np.sum(np.power(vertices - iVector,2),axis=1))\n \n #calculate lattice sums\n xx = sum(((r**2-3*x**2)/r**5))\n yy = sum(((r**2-3*y**2)/r**5))\n zz = sum(((r**2-3*z**2)/r**5))\n xy = sum(((-3*x*y)/r**5))\n xz = sum(((-3*x*z)/r**5))\n yz = sum(((-3*y*z)/r**5))\n\n A[jNumber] = factor*np.array([[xx, xy, xz],[xy,yy,yz],[xz,yz,zz]])*self.g_grid\n\n for jNumber in np.arange(1,9):\n\n #obtain a sphere of lattice points of radius R centred on ion position 1B \n iVector = self.position['1A'] \n vertices,_ = self.spherical_bravais_lattice(R,1,'A',jNumber,'B',lattice_multiplier=2) \n \n #calculate the relative position of every lattice point\n x = vertices[:,0] - iVector[0] \n y = vertices[:,1] - iVector[1]\n z = vertices[:,2] - iVector[2]\n r = np.sqrt(np.sum(np.power(vertices - iVector,2),axis=1))\n \n #calculate lattice sums\n xx = sum(((r**2-3*x**2)/r**5))\n yy = sum(((r**2-3*y**2)/r**5))\n zz = sum(((r**2-3*z**2)/r**5))\n xy = sum(((-3*x*y)/r**5))\n xz = sum(((-3*x*z)/r**5))\n yz = sum(((-3*y*z)/r**5)) \n \n B[jNumber] = factor*np.array([[xx, xy, xz],[xy,yy,yz],[xz,yz,zz]])*self.g_grid\n return A, B", "def find_loc_indices(loc, dir, tile):\n #returns the indices of the nearest neighbor point in the given tile, the lon/lat of the nearest neighbor, \n #and the distance (m) from the given point to the nearest neighbor grid cell\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n #read in supergrid longitude and latitude\n lon_super = np.array(nc_file['x']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n lat_super = np.array(nc_file['y']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n #get the longitude and latitude data for the grid centers by slicing the supergrid \n #and taking only odd-indexed values\n longitude = lon_super[1::2,1::2]\n latitude = lat_super[1::2,1::2]\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n temp_loc = copy.deepcopy(loc)\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n if loc[0] < 180:\n temp_loc[0] += 360\n \n #set up an array to hold the euclidean distance between the given point and every grid cell\n eucl_dist = np.zeros((longitude.shape[0],longitude.shape[1]))\n \n #get the Cartesian location of the given point\n cart_loc = np.array(sph2cart(math.radians(temp_loc[0]), math.radians(temp_loc[1]), earth_radius))\n \n for i in range(len(longitude)):\n for j in range(len(longitude[i])):\n #get the Cartesian location of all grid points\n cart_cell = np.array(sph2cart(math.radians(longitude[i,j]), math.radians(latitude[i,j]), earth_radius))\n \n #calculate the euclidean distance from the given point to the current grid cell\n eucl_dist[i,j] = np.linalg.norm(cart_loc - cart_cell)\n \n #get the indices of the grid point with the minimum euclidean distance to the given point\n i,j = np.unravel_index(eucl_dist.argmin(), eucl_dist.shape)\n \n return (i,j,longitude[i,j]%360.0, latitude[i,j], eucl_dist[i,j])", "def topsis(matrix, weight, norm_m, id_sol):\n z = mul_w(weight, norm(matrix, norm_m))\n s, f = zenith_nadir(z, id_sol)\n p, n = distance(z, s, f)\n final_s = array([n[i] / (p[i] + n[i])\n\t\tfor i in range(p.shape[0])])\n if pl == 'y':\n q = [i + 1 for i in range(matrix.shape[0])]\n\n return final_s", "def mapping(s, t, s_new, k,c):\n n, s_dim = s.shape\n t_dim = t.shape[1]\n n_new = s_new.shape[0]\n # 1. determine nearest neighbors\n dist = np.sum((s[np.newaxis] - s_new[:,np.newaxis])**2,-1)\n nn_ids = np.argsort(dist)[:,:k] # change to [:,:k]\n nns = np.row_stack([s[nn_ids[:,ki]] for ki in range(k)])\n nns = nns.reshape((n_new, k, s_dim), order='F')\n # 2 determine gram matris; \n dif = s_new[:,np.newaxis] - nns\n G = np.tensordot(dif,dif,axes=([2],[2]))\n G = G[np.arange(n_new),:,np.arange(n_new)]\n # 3. determine weights not worth vectorizing this \n weights = np.zeros((n_new, k))\n for i_n in range(n_new): \n weights[i_n] = np.linalg.inv(G[i_n]+c*np.eye(k)).dot(np.ones((k,)))\n weights /= np.sum(weights, -1, keepdims=True)\n # 4. compute coordinates\n t_nns = np.row_stack([t[nn_ids[:,ki]] for ki in range(k)])\n t_nns = t_nns.reshape((n_new,k, t_dim), order='F')\n t_new = np.dot(weights, t_nns)\n t_new = t_new[np.arange(n_new), np.arange(n_new)]\n return t_new", "def bosonic_cells(self):\n cells = self.cells()\n fermionic_cells = self.fermionic_cells()\n coords = [x for x in cells if x not in fermionic_cells]\n return coords", "def find_nearby_membranes(all_membranes, all_membrane_map, vert_normals):\r\n membrane_tree = scipy.spatial.cKDTree(all_membranes)\r\n nearby_membranes = np.array(list(membrane_tree.query_pairs(adhesion_max_dist, p=2)))\r\n nearby_membrane_map = defaultdict(list)\r\n if nearby_membranes.shape[0] > 0:\r\n # Exclude same-cell membrane interactions and same-direction-facing segments\r\n all_vert_normals = np.concatenate(vert_normals, axis=0)\r\n subset = np.where(\r\n (all_membrane_map[nearby_membranes[:, 0], 0] !=\r\n all_membrane_map[nearby_membranes[:, 1], 0])\r\n & (np.einsum('ij,ik->i', all_vert_normals[nearby_membranes[:, 0]], all_vert_normals[nearby_membranes[:, 1]]) < 0.0)\r\n )\r\n nearby_membranes = nearby_membranes[subset]\r\n # {cell idx: (vert idx, other cell idx, other vert idx, 'all_membranes' vert idx)}\r\n for nm in nearby_membranes:\r\n m0 = all_membrane_map[nm[0]]\r\n m1 = all_membrane_map[nm[1]]\r\n nearby_membrane_map[m0[0]].append((m0[1], m1[0], m1[1], nm[1]))\r\n nearby_membrane_map[m1[0]].append((m1[1], m0[0], m0[1], nm[0]))\r\n nearby_membrane_map = {k: np.array(v)\r\n for k, v in nearby_membrane_map.items()}\r\n# print(nearby_membrane_map)\r\n return nearby_membranes, nearby_membrane_map", "def get_LDAU(self):\n\n # let's simply use the default as a first step\n LDAU_dict, poscar_need_hack, potcar_need_hack = super(U_Strategy_MaterialsProject_V2, self).get_LDAU()\n\n Na_indices = self.structure.indices_from_symbol('Na')\n\n # hack MAGMOM\n list_oxidizable_site_indices = self.sort_TM_sites_by_Na_distance(Na_indices)\n\n MAGMOM = self.build_magmom(list_oxidizable_site_indices)\n LDAU_dict['MAGMOM'] = MAGMOM \n\n return LDAU_dict, poscar_need_hack, potcar_need_hack", "def create_jointsmap(uv_coord, size):\r\n\r\n\t# define connections and colors of the bones\r\n\t# print(coords_hw[-1]) # this is center ( the 22nd point)\r\n\tcanvas = np.zeros((size, size, 3))\r\n\tbones = [\r\n\t\t((1, 2), THUMB_COLOR1),\r\n\t\t((2, 3), THUMB_COLOR2),\r\n\t\t((3, 4), THUMB_COLOR3),\r\n\r\n\t\t((5, 6), INDEX_COLOR1),\r\n\t\t((6, 7), INDEX_COLOR2),\r\n\t\t((7, 8), INDEX_COLOR3),\r\n\r\n\t\t((9, 10), MIDDLE_COLOR1),\r\n\t\t((10, 11), MIDDLE_COLOR2),\r\n\t\t((11, 12), MIDDLE_COLOR3),\r\n\r\n\t\t((13, 14), RING_COLOR1),\r\n\t\t((14, 15), RING_COLOR2),\r\n\t\t((15, 16), RING_COLOR3),\r\n\r\n\t\t((17, 18), PINKY_COLOR1),\r\n\t\t((18, 19), PINKY_COLOR2),\r\n\t\t((19, 20), PINKY_COLOR3)]\r\n\tpalm = []\r\n\tfor connection, _ in [((0, 1), []),\r\n\t\t\t\t\t\t ((1, 5), []),\r\n\t\t\t\t\t\t ((5, 9), []),\r\n\t\t\t\t\t\t ((9, 13), []),\r\n\t\t\t\t\t\t ((13, 17), []),\r\n\t\t\t\t\t\t ((17, 0), []), ]:\r\n\t\tcoord1 = uv_coord[connection[0]]\r\n\t\tpalm.append([int(coord1[0]), int(coord1[1])])\r\n\t# palm.append([int((coord1[0]-.5)* W_scale+ W_offset ), int(-(coord1[1]- .5)* H_scale+ H_offset)])\r\n\t# print(palm)\r\n\tcv2.fillConvexPoly(canvas, np.array([palm], dtype=np.int32), PALM_COLOR)\r\n\tfor connection, color in bones:\r\n\t\tcoord1 = uv_coord[connection[0]]\r\n\t\tcoord2 = uv_coord[connection[1]]\r\n\t\tcoords = np.stack([coord1, coord2])\r\n\t\t# 0.5, 0.5 is the center\r\n\t\tx = coords[:, 0]\r\n\t\ty = coords[:, 1]\r\n\t\tmX = x.mean()\r\n\t\tmY = y.mean()\r\n\t\tlength = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5\r\n\t\tangle = np.math.degrees(np.math.atan2(y[0] - y[1], x[0] - x[1]))\r\n\t\tpolygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), 16), int(angle), 0, 360, 1)\r\n\t\tcv2.fillConvexPoly(canvas, polygon, color)\r\n\treturn canvas", "def alignMonoPlane(entry,prec=1E-4,seed_index=0,supercell=2,\n c_mag=50,dist_from_plane=3):\n\n\n # Keep original copy of structure\n s = copy.deepcopy(entry[0])\n\n\n new_latt,fit_fracs= getNewLattice(entry,dim=2,prec=prec,seed_index=seed_index,\n supercell=supercell,c_mag=c_mag)\n\n\n \n\n # Identify plane to translate atoms towards\n\n plane = Plane(Point3D(s.sites[seed_index].coords),\n normal_vector=new_latt[2])\n \n # Create list of translationss\n trans = list(itertools.product([1,-1,0],repeat=3))\n\n lat = np.array(s.lattice.as_dict()['matrix'])\n final_sites = []\n i=0\n \n # Ensure that the atoms are nearby each other\n for site in [x.coords for x in s.sites]:\n point = Point3D(site)\n if 1==1:\n\n news = []\n \n # translate atomic sites to see which position is closest to plane\n for t in trans:\n point = Point3D(site+np.dot(np.transpose(lat),t))\n news.append([float(plane.distance(point)),t])\n news.sort(key = lambda x:x[0])\n for new in news:\n if not np.any([magni((site+np.dot(np.transpose(lat),new[1]))-x)<=prec for x in final_sites]):\n final_sites.append(site+\n np.dot(np.transpose(lat),new[1]))\n break\n i+=1\n \n # Create new lattice matricies\n lat1 = np.array([new_latt[0],new_latt[1],new_latt[2]])\n lat2 = np.array([new_latt[1],new_latt[0],new_latt[2]])\n\n # Generate atomic fractions\n new_fracs1 = np.linalg.solve(lat1.T,np.array(final_sites).T).T\n new_fracs2 = np.linalg.solve(lat2.T,np.array(final_sites).T).T\n\n species=fit_fracs[1]\n\n return([species,new_fracs1,lat1],[species,new_fracs2,lat2])", "def test_20_supergeom_simple(self):\n for proj in ['TAN', 'CEA']:\n ra0, dec0 = CRVAL\n res = 0.01 * DEG\n wcs = coords.get_wcs_kernel(proj, ra0, dec0, res)\n\n wcs.wcs.crpix = (60, 70)\n map0 = enmap.zeros((100,200), wcs=wcs)\n map0[2, 3] = 10.\n map0[90, 192] = 11.\n\n # Extracts.\n m1 = map0[:10,:10]\n m2 = map0[-10:,-10:]\n \n # Reconstruct.\n sg = coords.get_supergeom((m1.shape, m1.wcs), (m2.shape, m2.wcs))\n mapx = enmap.zeros(*sg)\n mapx.insert(m1)\n mapx.insert(m2)\n self.assertTupleEqual(map0.shape, mapx.shape)\n self.assertTrue(np.all(mapx==map0))", "def similarity_matrix(points, sigma):\n distances_squared = spherical_distances(points, points)**2\n\n \n return np.exp( -distances_squared / (2.0 * sigma) )", "def get_grid_index(init_grid_size, map_size, device):\n H_init, W_init = init_grid_size\n H, W = map_size\n idx = torch.arange(H * W, device=device).reshape(1, 1, H, W)\n idx = F.interpolate(idx.float(), [H_init, W_init], mode='nearest').long()\n return idx.flatten()", "def calc_dmi(da):\n boxW = [-10.0,10.0,50.0,70.0]\n boxE = [-10.0,0.0,90.0,110.0]\n \n da_W = da.sel(lat=slice(10, -10), lon=slice(50, 70)).mean(['lat', 'lon'])\n da_E = da.sel(lat=slice(0, -10), lon=slice(90, 110)).mean(['lat', 'lon'])\n \n return (da_W - da_E)", "def makeindmapKDE(self,indmap,s, background):\n import ipyml\n from ipyml.probability import pfunc\n sp = background.shape\n res = np.zeros((sp[0], sp[1]),dtype=np.float32)\n wr,wc = indmap.shape[0], indmap.shape[1]\n filter_size = 30\n stride = 12\n cov = np.asarray([[(2.0/filter_size)**2,0],[0,(2.0/filter_size)**2]])\n if 'g' in self.temp_data:\n g = self.temp_data['g']\n else:\n g = pfunc.Gaussian2D((sp[0],sp[1]),cov=cov,invcov=False)\n self.temp_data['g'] = g\n center_r = sp[0]\n center_c = sp[1]\n g = g/g.max()\n for r in range(wr):\n for c in range(wc):\n # calcuate the center of detection window\n rr = (r * stride + r * stride + filter_size-1)/2\n cc = (c * stride + c * stride + filter_size-1)/2\n offset_r = center_r - rr\n offset_c = center_c - cc\n res = res + g[offset_r:offset_r+sp[0],offset_c:offset_c+sp[1]] * indmap[r,c]\n idx = np.argmax(res)\n res = np.tile(res.reshape((res.shape[0],res.shape[1],1)),[1,1,3])\n mr = idx / sp[1]\n mc = idx - mr * sp[1]\n hf = filter_size/2\n box = np.asarray([mc -hf,mr -hf,mc + hf, mr + hf])\n return res/3, box", "def __init__(self, som):\n # The owner of this class.\n self.som = som\n\n # What is the worst BMU distance so far, this becomes the error for the\n # entire SOM.\n self.worst_distance = 0", "def test_blat_nt_database_mapper(self):\r\n blat_nt_database_mapper(query_fp=self.inseqs2_fp,\r\n refseqs_fp=self.refseqs2_fp,\r\n output_dir=self.test_out,\r\n evalue=1e-10,\r\n min_id=0.75,\r\n HALT_EXEC=False)\r\n observation_map_fp = join(self.test_out, 'observation_map.txt')\r\n self.assertTrue(exists(observation_map_fp))\r\n observation_table_fp = join(self.test_out, 'observation_table.biom')\r\n table = parse_biom_table(open(observation_table_fp, 'U'))\r\n self.assertItemsEqual(table.SampleIds, ['s2', 's1'])\r\n self.assertItemsEqual(\r\n table.ObservationIds,\r\n ['r1',\r\n 'r2',\r\n 'r3',\r\n 'r4',\r\n 'r5'])\r\n self.assertEqual(table.sum(), 6)", "def nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths=True):\n return _nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths)" ]
[ "0.57011366", "0.5595913", "0.5572306", "0.5561625", "0.5546656", "0.5420969", "0.5410991", "0.53931206", "0.5357705", "0.5275946", "0.5256405", "0.5252467", "0.52403647", "0.52389497", "0.52355295", "0.5224548", "0.5204496", "0.5180344", "0.51797044", "0.51730376", "0.51412076", "0.5068669", "0.5053913", "0.4998826", "0.49872217", "0.49779648", "0.497275", "0.49656913", "0.49619395", "0.49524575", "0.49524575", "0.4947485", "0.49410293", "0.4938562", "0.4934264", "0.4931503", "0.49308962", "0.49296334", "0.4916642", "0.49155834", "0.49026316", "0.487804", "0.48732492", "0.4859131", "0.48462254", "0.48452544", "0.48382348", "0.48366308", "0.48172924", "0.48144817", "0.4807037", "0.48047432", "0.4803532", "0.4791145", "0.47804174", "0.47660777", "0.47618875", "0.47616255", "0.47595966", "0.47592652", "0.4757366", "0.4745915", "0.47346774", "0.47289813", "0.47220695", "0.47214875", "0.47206938", "0.47125122", "0.47080112", "0.47048926", "0.470484", "0.470442", "0.46949872", "0.46914765", "0.46870333", "0.46844295", "0.46837434", "0.46823192", "0.46820185", "0.46801153", "0.46799207", "0.46788022", "0.4676106", "0.46733364", "0.46708533", "0.46686637", "0.4668384", "0.4666777", "0.466664", "0.46629152", "0.46620935", "0.46613306", "0.46575925", "0.46528295", "0.46522516", "0.46513578", "0.46435803", "0.46435425", "0.4640974", "0.463209" ]
0.752086
0
Confirm if the module is ready to use.
def confirm_installation(cls): # here, you should write any code needed to confirm that all the # dependencies required by your module are installed. # this class method will be executed when HADDOCK3 starts. # if you module does not import any run-time dependency, just leave # this method blank return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_ready() -> bool:\n return True", "def is_ready(self) -> bool:\n pass", "def if_ready(self, **kwargs):\n return True", "def ready(self):\n return True", "def is_ready(cls):\n\n return False", "def do_ready(self) -> bool:\n logger.info('Device ' + self.name + ' is ready.')\n return False", "def is_ready(self, shutit_module_obj):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif shutit_module_obj.module_id in self.get_current_shutit_pexpect_session_environment().modules_ready:\n\t\t\tself.log('is_ready: returning True from cache',level=logging.DEBUG)\n\t\t\treturn True\n\t\tready = shutit_module_obj.check_ready(self)\n\t\tif ready:\n\t\t\tself.get_current_shutit_pexpect_session_environment().modules_ready.append(shutit_module_obj.module_id)\n\t\t\treturn True\n\t\treturn False", "def check_ready(self, throw_error=True):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tself.log('PHASE: check_ready', level=logging.DEBUG)\n\t\terrs = []\n\t\tself.pause_point('\\nNow checking whether we are ready to build modules configured to be built', print_input=False, level=3)\n\t\t# Find out who we are to see whether we need to log in and out or not.\n\t\tfor module_id in self.module_ids():\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tself.log('considering check_ready (is it ready to be built?): ' + module_id, level=logging.DEBUG)\n\t\t\tif cfg[module_id]['shutit.core.module.build'] and module.module_id not in self.get_current_shutit_pexpect_session_environment().modules_ready and not self.is_installed(module):\n\t\t\t\tself.log('checking whether module is ready to build: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\t# Move to the correct directory (eg for checking for the existence of files needed for build)\n\t\t\t\trevert_dir = os.getcwd()\n\t\t\t\tself.get_current_shutit_pexpect_session_environment().module_root_dir = os.path.dirname(self.shutit_file_map[module_id])\n\t\t\t\tself.chdir(self.get_current_shutit_pexpect_session_environment().module_root_dir)\n\t\t\t\tif not self.is_ready(module) and throw_error:\n\t\t\t\t\terrs.append((module_id + ' not ready to install.\\nRead the check_ready function in the module,\\nor log messages above to determine the issue.\\n\\n', self.get_shutit_pexpect_session_from_id('target_child')))\n\t\t\t\tself.logout(echo=False)\n\t\t\t\tself.chdir(revert_dir)\n\t\treturn errs", "def allready(antReady) :\n return numNotready(antReady) == 0", "def ready(cls):\n pass", "def ready(self):\n pass", "def check_ready(self):\r\n print \"Checking ready\"\r\n\t\tif self.game.trough.is_full():\r\n print \"Ready\"\r\n\t\t\tself.ready()\r\n\t\t\treturn True\r\n\t\tprint \"Not Ready\"\r\n\t\treturn False", "def isReady(self):\n\t\twhile self.osc.trigger_state() != \"save\":\n\t\t\ttime.sleep(.1)\n\t\treturn True", "def is_readytoserve(self):\n isreadytoserve = True\n if (not self.comp('packmanager').is_readytoserve()):\n isreadytoserve = False\n return isreadytoserve", "def check_apps_ready(self):\n if not self.apps_ready:\n raise RuntimeError(\"Apps aren't loaded yet.\")", "def _ready(cls):\n sync_call(cls.ready)", "def is_available():", "def check_connect_ready(self):\r\n need_no_account = not self.client.secret.know_secret()\r\n need_no_depth = not self.config.get_bool(\"gox\", \"load_fulldepth\")\r\n need_no_history = not self.config.get_bool(\"gox\", \"load_history\")\r\n need_no_depth = need_no_depth or FORCE_NO_FULLDEPTH\r\n need_no_history = need_no_history or FORCE_NO_HISTORY\r\n ready_account = \\\r\n self.ready_idkey and self.ready_info and self.orderbook.ready_owns\r\n if ready_account or need_no_account:\r\n if self.orderbook.ready_depth or need_no_depth:\r\n if self.history.ready_history or need_no_history:\r\n if self._was_disconnected:\r\n self.signal_ready(self, None)\r\n self._was_disconnected = False", "def is_ready(self):\n return self.__is_ready", "def ready(self):\n return self.settings is not None", "def verify_package_status(self):\n pass", "def verify_package_status(self):\n pass", "def ready(self):\n if self.proc.stdout.readline() != \"OK\\n\":\n raise ValueError(\"Le bot {bot} n'arrive pas à se préparer\".format(bot=self.name))", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def is_ready(self):\n return self._is_ready", "def ready(self):\n if not self.is_setup:\n return False\n\n if self.pocs.observatory.mount.is_parked:\n print_warning('Mount is parked. To unpark run `unpark`')\n return False\n\n return self.pocs.is_safe()", "def is_ready(self):\n return self.prep_job.is_done()", "def is_ready(self):\n return self._is_ready()", "def available(self):\n return True", "def available(self):\n return True", "def is_ready(self) -> bool:\n return self.build_progress == 1.0", "def is_ready(self, addr: int, /) -> bool:", "def check_availability(self):\n pass", "def available(self) -> bool:\n return True", "def available(self) -> bool:\n return True", "def isReady(self):\n return self._lowLevelIsReady()", "def waitonready(self):\n debug('ControllerStartup.waitonready()')\n waitonready(self.pidevice, **self._kwargs)", "def is_setup(self):\n return self._market_data_sock_info.ready.is_set() and \\\n self._orders_sock_info.ready.is_set()", "def wait_until_ready(self):\n while not self.is_ready():\n time.sleep(0.01)", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def ready(self):\n return self.snippets is not None", "async def is_model_ready(\n self,\n model_name: str,\n model_version: str = ...,\n headers: dict[str, t.Any] = ...,\n ) -> bool:", "def is_ready(self):\n return len(self.unresolved_placeholders) == 0", "def ready(self):\n import exams.signals # pylint: disable=unused-import", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def is_ready(self) -> bool:\n return self._ready.is_set()", "def is_ready(self):\n for dependency in self.dependencies:\n if not dependency.is_finished:\n return False\n # If all dependencies are finished we're ready.\n return True", "def _IsReady(self):\n return self._GetPod()['status']['phase'] != 'Pending'", "def _check_ready(self, _widget, __event=None, __page=0):\r\n\r\n if self.cmbHardware.get_active() > 0:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n\r\n return False", "def IsReady(self):\r\n\t\treturn self._get_attribute('isReady')", "def test_up_to_date(self):\n last_public_release = get_pypi_version()\n self.assertFalse(update_available(last_public_release))", "def ready(self):\n self.stdout.write('READY\\n')\n self.stdout.flush()", "def ready(self):\n if self._wait_auth:\n return False\n return True", "def ready(self):\n\n from . import signals # noqa", "def is_ready_update(self):\n raise UnityTrainerException(\"The is_ready_update method was not implemented.\")", "def is_ready(cls):\n\n return SUB_NOTIFY_READY", "def is_ready(self):\n if self.id is None:\n return False\n\n return True", "def ready(self):\n return self.counter > 0", "def is_ready(self):\n if self.producer is None:\n return False\n return True", "def check_models_ready(self):\n if not self.models_ready:\n raise RuntimeError(\"Models aren't loaded yet.\")", "def ready(self):\n logger.info('game.ready')\n import game.signals", "def requestReady(self):\n if self.team[self.team_num][self.map_pos].avatarLabel['text'] == \"\":\n return;\n \n if self.isHost:\n obj = {\"worldName\":self.worldInfo.worldName}\n main.cManager.sendRequest(Constants.CMSG_START_TO_READY_GAME, obj)\n \n else:\n obj ={\"worldName\": self.worldInfo.worldName}\n main.cManager.sendRequest(Constants.CMSG_READY, obj)\n self.isReady = 1", "def window_ready(self):\n raise NotImplementedError", "def ServerIsReady( self ):\n return self.ServerIsHealthy()", "def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def is_ready(self):\n ready = True\n for browser in self.browsers:\n if 'exe' in self.browsers[browser]:\n exe = self.browsers[browser]['exe']\n if not os.path.isfile(exe):\n logging.critical(\"Browser executable is missing for %s: '%s'\", browser, exe)\n ready = False\n return ready", "def ready(self):\n random.seed()\n return True", "def this_needs_work_test_ensure_our_presence(self):\n self.do_test_ensure_our_presence()", "def _IsReady(self):\n if self.ip_address is None:\n self._GetIpAddress()\n if self.ip_address is not None:\n url = 'http://%s' % (self.ip_address)\n r = requests.get(url)\n if r.status_code == 200:\n return True\n return False", "def setup_complete():\n\n async def predicate(ctx:vbu.Context):\n if await fetch_guild_settings(ctx):\n return True\n raise CheckFailure(f'Your server hasn\\'t yet been set up. Use {ctx.prefix}setup')\n return commands.check(predicate)", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def runRequirements(self):\n ready = (self.user[\"Save\"] != \"\" and self.user[\"Video\"] != \"\") or self.img_exist\n return ready", "def available(self):\r\n\r\n self._available = True\r\n self.owner.trigger(\"on_available\")", "def loadSuccessful(self):\r\n\r\n return (self.config != None)", "def wait_for_server_ready(self, params):\n hub_client = QTask()\n hub_client.createBlindTask('ubqc', params)\n ret = hub_client.waitBlindTask(1)\n if ret is None:\n return False\n\n path, secret = ret\n self.entry = f\"{blindCompAddr}/{path}\"\n self.secret = secret\n\n return True", "def external_input_ready(self):\n return True", "def is_available(self) -> bool:\n raise NotImplementedError", "def ready(self):\n import checkout.signals # noqa: F401", "def is_setup(self):\n if self.pocs is None:\n print_warning('POCS has not been setup. Please run `setup_pocs`')\n return False\n return True", "def is_ready(self):\n if self.game.has_started():\n return True\n return self.status == self.PLAYER_READY", "def test_installed(self):\n self.assertTrue(self.qi.isProductInstalled(PROJECTNAME))", "def ready_bool(ready):\r\n if ready.lower() == 'y':\r\n return True\r\n else:\r\n return False", "def ready(self):\n import main.signals # noqa", "def _is_ready(self):\n current_wait_time = 0\n start_time = time.time()\n while current_wait_time < self.max_wait_time_ready:\n try:\n response = requests.get(os.path.join(self.url, \"ready\"))\n if response.status_code == 200:\n break\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n time.sleep(1)\n current_wait_time = time.time() - start_time\n if current_wait_time >= self.max_wait_time_ready:\n raise TimeoutError(\"Interrupting execution\\n'/ready' endpoint is not ready \" +\n \"for maximum allowed {:d} seconds!\".format(self.max_wait_time_ready))", "def trigger_ready(self) -> None:\n self.trigger_signal(\"ready\")", "async def is_server_ready(self, headers: dict[str, t.Any] = ...) -> bool:", "def sox_check_is_available(self):\n result = self._process_command('sox -h', PIPE, supress_dry_run=True)\n return result[0] == 0", "def connection_ready(self) -> bool:\n return self._connection_ready", "def is_available(self):\n try :\n p = subprocess.Popen([self.program_path, self.help_argument],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.communicate()\n return p.wait() == self.help_return_code\n except OSError:\n return False", "def test_pm_Completeness(self):\n pass", "def test_installed(self):\n # OSA script should have been installed in setUp function\n self.assertTrue(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))\n # Clean up install\n self.run_function(\"assistive.remove\", [OSA_SCRIPT])\n # Installed should now return False\n self.assertFalse(self.run_function(\"assistive.installed\", [OSA_SCRIPT]))", "def is_available(self):\n raise NotImplementedError", "async def wait_until_ready(self):\n await self._ready.wait()", "def _is_ready(self):\n current_wait_time = 0\n start_time = time.time()\n while current_wait_time < self.max_wait_time_ready:\n try:\n response = requests.get(os.path.join(self.url, \"ready\"), timeout=1)\n if response.status_code == 200:\n break\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n current_wait_time = time.time() - start_time\n if current_wait_time >= self.max_wait_time_ready:\n raise TimeoutError(\"Interrupting execution\\n'/ready' endpoint is not ready \" +\n \"for maximum allowed {:d} seconds!\".format(self.max_wait_time_ready))", "def has_setup(self):\n\n return hasattr(self._module, 'setup')", "def is_on(self):\n return not self.ready" ]
[ "0.74207395", "0.7248439", "0.7105069", "0.70115364", "0.6927248", "0.68734705", "0.6796418", "0.6686295", "0.65093833", "0.64878744", "0.6479587", "0.6478549", "0.6433703", "0.63425285", "0.63339937", "0.63253385", "0.6316342", "0.62913597", "0.6279354", "0.62712103", "0.6251663", "0.6251663", "0.6194554", "0.61881363", "0.61881363", "0.61881363", "0.6185451", "0.61790454", "0.61761487", "0.61713624", "0.6162882", "0.6162882", "0.6151697", "0.6143967", "0.6125462", "0.61050683", "0.61050683", "0.6092645", "0.60841477", "0.6060497", "0.6029104", "0.6026294", "0.6020377", "0.6010231", "0.60075444", "0.5990789", "0.5987106", "0.5987106", "0.5987106", "0.5984391", "0.59840643", "0.5981792", "0.5977546", "0.5956763", "0.59566826", "0.59371376", "0.5925004", "0.59181726", "0.5914072", "0.58978605", "0.58910054", "0.5877221", "0.5876157", "0.5858548", "0.5851107", "0.5844293", "0.5835148", "0.58288205", "0.58259195", "0.58180124", "0.5817775", "0.58039296", "0.5800938", "0.5792022", "0.57647604", "0.5758969", "0.5756495", "0.5754703", "0.5753668", "0.57488906", "0.5740309", "0.5738475", "0.57229877", "0.5721595", "0.5721529", "0.5719209", "0.5718539", "0.5718292", "0.57113576", "0.5704509", "0.5703985", "0.5703293", "0.57012236", "0.5698415", "0.56924254", "0.56888837", "0.56861025", "0.5677291", "0.5669954", "0.5669062" ]
0.6735416
7
helper function to get the next ocurring monday as a date object
def _get_next_monday(self): today = datetime.date.today() weekday_int = today.weekday() if weekday_int == 0: return today next_mon = today + timedelta(7 - weekday_int) return next_mon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1", "def next_day_of_week(current, day_of_week):\n\n while current.weekday() != day_of_week:\n current += timedelta(1)\n return current", "def get_next_weekday(date, weekday):\n return date + dt.timedelta(days=(weekday - date.weekday() + 7) % 7)", "def get_next_weekend():\n d = datetime.date.today()\n # day 5 for saturday\n t = datetime.timedelta((7 + 5 - d.weekday()) % 7)\n return (d + t).strftime('%d-%m-%Y')", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def next_day(date):\n return date + datetime.timedelta(days=1)", "def get_next_day(self):\n pass", "def _to_next_ceiling_busi_day(date):\n try:\n date = parse(date)\n except TypeError:\n date = date\n\n date = date + relativedelta(months=+1)\n date = DateUtils._to_ceiling_busi_day(date)\n\n return date", "def next_weekday(date, weekday):\n delta = weekday - date.weekday()\n if delta < 0:\n delta += 7\n return date + timedelta(days=int(delta))", "def get_mothers_day_date(year):\n day = date(year=year, month=5, day=1)\n while 1:\n if day.weekday() == 6:\n day += timedelta(days=7)\n break\n day += timedelta(days=1)\n return day", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def next_sunday(day):\n if day.weekday() == 6: # sunday\n return day + timedelta(days=7)\n else:\n return day + timedelta(days=(6 - day.weekday()))", "def get_next_closest_day(weekday):\n names = {\n 'monday': 0,\n 'tuesday': 1,\n 'wednesday': 2,\n 'thursday': 3,\n 'friday': 4,\n 'saturday': 5,\n 'sunday': 6\n }\n\n today = get_current_india_time().date()\n day_shift = (names[weekday] - today.weekday()) % 7\n next_day = datetime.datetime.combine(\n today + datetime.timedelta(days=day_shift), datetime.time.min)\n\n if next_day.weekday() == today.weekday():\n next_day = next_day + datetime.timedelta(days=7)\n return next_day", "def meetup_day(year, month, dow, wom):\n first_dow = monthrange(year, month)[0]\n days_in_month = monthrange(year, month)[1]\n possible_dates = []\n print str(year) + str(month) + dow + wom\n\n \"\"\"Build dictionary of possible dates based on dow\"\"\"\n for day in range(1, days_in_month+1):\n if datetime.date(year, month, day).strftime(\"%A\") == dow:\n print day\n possible_dates.extend([day])\n\n \"\"\"Perform logic on wom constraint\"\"\"\n if wom == \"teenth\":\n for day in possible_dates:\n if day > 12 and day < 20:\n return datetime.date(year, month, day)\n elif wom == \"last\":\n return datetime.date(year, month, possible_dates[-1])\n else:\n return datetime.date(year, month, possible_dates[ int(wom[:1]) - 1 ])", "def next_seven_day(self):\n today = datetime.date.today()\n week_next = today + datetime.timedelta(days=7)\n return week_next.strftime('%Y-%m-%d')", "def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))", "def get_next_midnight():\n return pytz.utc.localize(datetime.datetime.today()).replace(\n hour=0, minute=0, second=0, microsecond=0\n ) + datetime.timedelta(days=1)", "def get_next_midnight():\n return pytz.utc.localize(datetime.datetime.today()).replace(\n hour=0, minute=0, second=0, microsecond=0\n ) + datetime.timedelta(days=1)", "def getPinnedDayOfNextMonth(year, month, day):\n\tyear = year + (month / 12) # purposeful integer division\n\tmonth = (month % 12) + 1\n\tday = pinDayToMonth(year, month, day)\n\treturn datetime.date(year, month, day)", "def next_week_start(iso_date: Optional[str] = None) -> date:\n if iso_date:\n current_date = date.fromisoformat(iso_date)\n else:\n current_date = date.today()\n\n days_until_monday = 7 - current_date.weekday()\n\n candidate_start = current_date + timedelta(days=days_until_monday)\n while candidate_start in holidays.US():\n candidate_start += timedelta(days=1)\n\n return candidate_start", "def next_weekday(weekday, d=datetime.datetime.now()):\n if weekday.lower() not in day_values:\n return None\n days_ahead = day_values[weekday.lower()] - d.weekday()\n if days_ahead <= 0: # Target day already happened this week\n days_ahead += 7\n return d + datetime.timedelta(days_ahead)", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def next_date(date_time_input, interval):\n\n if interval=='day':\n return date_time_input+timedelta(days=1)\n\n elif interval=='week':\n return date_time_input+timedelta(days=7)\n\n elif interval=='month':\n \n a = date_time_input+timedelta(days=31)\n next_month = a.replace(day=1)\n return next_month", "def next_month(dateobj):\n year_delta, old_month = divmod(dateobj.month, 12)\n return datetime.date(dateobj.year + year_delta, old_month + 1, 1)", "def next_day(year, month, day):\n thisday = dt.datetime(year, month, day)\n nextday = thisday + dt.timedelta(days=1)\n y = nextday.year\n m = nextday.month\n d = nextday.day\n return y, m, d", "def _next_week(self) -> datetime.datetime:\n now = datetime.datetime.now()\n for i in range(7):\n yield now + datetime.timedelta(i)", "def tomorrow(self):\n if self.isLeapYear():\n fdays = 29\n else:\n fdays = 28\n\n DIM = [0, 31, fdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n currentDay = self.day\n maxDay = DIM[self.month]\n\n if currentDay == maxDay and self.month == 12:\n self.year += 1\n self.month = 1\n self.day = 1\n elif currentDay == maxDay:\n self.month += 1\n self.day = 1\n else:\n self.day += 1", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "def nth_dow_to_day(tupel, y):\r\n m = tupel[0]\r\n dow = tupel[1]\r\n n = tupel[2]\r\n\r\n if dow == 7:\r\n dow = 0\r\n\r\n first_dow = date_to_dow(y, m, 1) # the dow of the first of the month\r\n shift = dow - first_dow\r\n if shift < 0:\r\n shift += 7\r\n\r\n return shift + (7 * n) - 6", "def nextDay(year, month, day):\n if day < 30:\n day += 1\n else:\n if month < 12:\n month += 1\n day = 1\n else:\n year += 1\n month = 1\n day = 1\n \n return(year, month, day)", "def next_date(date):\n #For this function, I just created as many if else statements as I could to cover every situation I could think of.\n #Most of these if else statements are distinct edge cases where I add 1 in a different spot each time.\n if date[0] == 1 or date[0] == 3 or date[0] == 5 or date[0] == 7 or date[0] == 8 or date[0] == 10:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 12:\n if date[1] < 31:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 31:\n nextday = (1, 1, date[2] + 1)\n return nextday\n elif date[0] == 4 or date[0] == 6 or date[0] == 9 or date[0] == 11:\n if date[1] < 30:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 30:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[0] == 2:\n if date[2] % 4 == 0 or date[2] % 1000 == 0:\n if date[1] < 29:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 29:\n nextday = (date[0] + 1, 1, date[2])\n return nextday\n elif date[1] < 28:\n nextday = (date[0], date[1] + 1, date[2])\n return nextday\n elif date[1] == 28:\n nextday = (date[0] + 1, 1, date[2])\n return nextday", "def nextDay(year1, month1, day1):\n if day1 < 30:\n day1 += 1\n else:\n if month1 < 12:\n month1 += 1\n day1 = 1\n else:\n year1 += 1\n month1 = 1\n day1 = 1\n \n return(year1, month1, day1)", "def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]", "def MayDay(year):\n\n day = datetime.date(year, 5, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 1:\n return day\n day += datetime.timedelta(days=1)", "def next_regular_trash_day(date: str) -> str:\n parsed_date = parser.parse(date)\n day_of_week = parsed_date.weekday()\n\n if day_of_week < TRASH_DAY:\n delta = TRASH_DAY - day_of_week\n elif day_of_week == TRASH_DAY:\n delta = 0\n else:\n delta = 7 - (day_of_week - TRASH_DAY)\n\n next_trash_date = parsed_date + datetime.timedelta(days=delta)\n return next_trash_date.strftime('%Y-%m-%d')", "def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def get_mothers_day_date(year):\r\n start_date = parse(f\"Jan {year}\").date()\r\n for date in rrule(YEARLY, dtstart=start_date, bymonth=5, byweekday=SU, bysetpos=2):\r\n if date.year == year:\r\n return date.date()", "def next_month(date):\n\n return date + datetime.timedelta(days=calendar.monthrange(date.year, date.month)[1])", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def next_day(isotext):\n as_arrow = arrow.get(isotext)\n return as_arrow.replace(days=+1).isoformat()", "def dow_1(self):\n return self._dayoffset + 1", "def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string", "def date_to_dow(y, m, d):\r\n # Python uses Monday week start, so wrap around\r\n w = calendar.weekday(y, m, d) + 1\r\n if w == 7:\r\n w = 0\r\n return w", "def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')", "def monday_last_week():\n today = datetime.date.today()\n last_week = today - datetime.timedelta(days=7)\n return last_week - datetime.timedelta(days=(last_week.isoweekday() - 1))", "def doomsday(y):", "def get_tomorrow(x: Optional[Date] = None) -> Date:\n return (x or get_today()) + TimeDelta(days=1)", "def convert_week_number_to_date(week_number, first_monday, weekday=0):\n assert(1 <= week_number <= 52)\n assert(0 <= weekday <= 6)\n first_gehol_year_day = datetime.strptime(first_monday, \"%d/%m/%Y\")\n num_days = (week_number-1) * 7 + weekday\n dt = timedelta(days = num_days)\n return first_gehol_year_day + dt", "def test_first_date_static_1(self):\n input_ = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 30))\n expected = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 31))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def get_next_week(self, startdate):\n dow_today = int(datetime.datetime.strftime(startdate, '%w'))\n days_until_sunday = 7 - ((dow_today + 7) % 7)\n #days_until_sunday = 7 - (dow_today + 1)\n sunday = startdate + datetime.timedelta(days=days_until_sunday)\n following_saturday = sunday + datetime.timedelta(days=6)\n next_week = (sunday, following_saturday)\n return next_week", "def get_next_trading_day_schedule(reference_day: dt):\n reference_day = reference_day.date()\n schedule = get_trading_calendar(reference_day, reference_day)\n while schedule.empty:\n reference_day += timedelta(days=1)\n schedule = get_trading_calendar(reference_day, reference_day)\n return schedule", "def test_first_date_static_2(self):\n input_ = (datetime.date(2006, 3, 1), datetime.date(2006, 3, 31))\n expected = (datetime.date(2006, 3, 1), datetime.date(2006, 4, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def test_monday(self):\n date = datetime.date(1981, 5, 4)\n self.assertEqual(date.isoweekday(), 1)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def nextDate(y, m, d):\n dateTuple = (y, m, d, 0, 0, 0, 0, 0, 0)\n epochSecs = mktime(dateTuple)\n nextDateTuple = localtime(epochSecs+24*60*60)\n return nextDateTuple[:3]", "def test_next_workday_at_10():\n saturday_at_8am = datetime(2017, 4, 1, 8, tzinfo=timezone.utc)\n assert next_workday_at_10(saturday_at_8am) == datetime(2017, 4, 3, 10, tzinfo=timezone.utc)\n tuesday_at_4am = datetime(2017, 4, 4, 4, tzinfo=timezone.utc)\n assert next_workday_at_10(tuesday_at_4am) == datetime(2017, 4, 5, 10, tzinfo=timezone.utc)\n wednesday_at_3pm = datetime(2017, 4, 5, 15, tzinfo=timezone.utc)\n assert next_workday_at_10(wednesday_at_3pm) == datetime(2017, 4, 6, 10, tzinfo=timezone.utc)", "def day_of_week(dt):\n cday = dt\n mday = 2\n uday = cday.isocalendar()[2] + mday\n try:\n if uday > 7:\n CURRDAY = uday - 7\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : \", CURRDAY)\n else:\n CURRDAY = uday\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : \", CURRDAY)\n return CURRDAY\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700;SCHEDULE ERROR \" + str(e), exc_info=False)\n sys.exit(0)", "def calculate_next_payment(frequency, payment_date, payment_month):\r\n\tif frequency == 1 or frequency == 4: # weekly or four-weekly\r\n\t\tnext_payment = payment_date + timedelta(weeks=frequency)\r\n\telif frequency == 2: # monthly\r\n\t\tnext_payment = payment_date.replace(month=payment_month + 1)\r\n\telse:\r\n\t\tnext_payment = date(1, 1, 1)\r\n\r\n\tprint(\"Frequency : \" + str(frequency)) # testing\r\n\tprint(\"Payment date: \" + str(payment_date)) # testing\r\n\tprint(\"Next payment: \" + str(next_payment)) # testing\r\n\r\n\treturn next_payment", "def day_of_the_week(arg):", "def get_last_monday():\n today = get_current_india_time()\n # Weekday for Monday is 0.\n if today.date().weekday() == 0:\n ts = today.replace(hour=0, minute=0, second=0, microsecond=0)\n else:\n next_monday = get_next_closest_day(\"monday\")\n week_delta = get_time_delta(7)\n ts = next_monday - week_delta\n return ts", "def _labor_day(year):\n day = datetime(year, 9, 1)\n delta = timedelta(days=1)\n while day.weekday() != 0:\n day += delta\n return day", "def weekly():", "def weekday(day):\n return (day % 7) - 1", "def get_date_of_weekday(weekday, week_number=-1, week_delta=0):\n days_to_add = days_to_add_to_monday(weekday)\n if week_number > -1:\n monday = get_monday_date_from_week_number(week_number)\n # result = monday + timedelta(days=days_to_add) + timedelta(weeks=week_delta)\n else:\n monday = get_monday_date(week_delta)\n # result = get_date(monday, weekday)\n result = monday + timedelta(days=days_to_add) + timedelta(weeks=week_delta)\n return result", "def morning_twilight_12(self, date=None):\n self.site.horizon = self.horizon12\n self._set_site_date(date)\n r_date = self.site.next_rising(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date", "def get_day(today: Day, weekday_number: int) -> Day:\n assert type(today) is Day\n assert type(weekday_number) is int\n\n today = today.to_date_time()\n date_list = list(rrule(DAILY, count=1, wkst=MO, byweekday=weekday_number, dtstart=today))\n if date_list:\n return Day(date_list[0])", "def _get_earliest_monday(self):\n try:\n earliest_request_date = self.org_admin.get_hours_requested().\\\n order_by('usertimelog__datetime_start').first().\\\n usertimelog.datetime_start\n except:\n earliest_request_date = self.org_admin.get_hours_approved().\\\n order_by('usertimelog__datetime_start').first().\\\n usertimelog.datetime_start\n\n earliest_monday = earliest_request_date - timedelta(\n days=(earliest_request_date.weekday()))\n earliest_monday = earliest_monday.replace(hour=00, minute=00, second=00)\n\n return earliest_monday", "def nextDay(year, month, day):\n if month==12 and day==30: #address change of year\n next_year,next_month,next_day=year+1,1,1\n elif month!=12 and day==30: #address change of month within a year\n next_year,next_month,next_day=year,month+1,1\n else: #address change of day within a month\n next_year,next_month,next_day=year,month,day+1\n \n return next_year,next_month,next_day", "def test_simple_case(self):\n input_ = (datetime.date(1991, 2, 26), datetime.date(1991, 3, 26))\n\n expected = (datetime.date(1991, 2, 27), datetime.date(1991, 3, 27))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def _next_trading_day(self, day):\n next_day = self._trading_days.shift(-1)[day]\n return next_day if not pd.isnull(next_day) else None", "def day(sym, date):\n return get(sym, date, date)[0][1]", "def getnextrunningdate(jsondata):\n\n returneddata = json.loads(jsondata)\n days = {}\n\n if returneddata[\"response_code\"]==200:\n trainData = returneddata[\"train\"]\n daysData = trainData[\"days\"]\n if daysData:\n for day in trainData[\"days\"]:\n days[day[\"day-code\"]]=day[\"runs\"]\n\n today = datetime.date.today()\n nextweekday = (today + datetime.timedelta(days=7))\n\n for i in range(len(days)):\n runningdate = (nextweekday + datetime.timedelta(days=i))\n if models.istrainrunningonjourneydate(days, runningdate):\n return runningdate\n\n return nextweekday", "def get_date_in_two_weeks():\n today = datetime.datetime.today()\n date_in_two_weeks = today + datetime.timedelta(days=14)\n return date_in_two_weeks.date()", "def _to_floor_busi_day(date):\n try:\n date = parse(date)\n except TypeError:\n date = date\n\n date = date + relativedelta(months=-1)\n date = date.replace(day=1)\n floor_busi_day = pd.date_range(date, periods=1, freq='BM').strftime(\"%Y-%m-%d\")[0]\n\n return floor_busi_day", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def first_of_next_month(ref_date):\n year, month = add_months(ref_date.year, ref_date.month, 1)\n return type(ref_date)(year, month, 1)", "def tomorrow(self):\n days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if self.is_leap_year():\n days_in_month[2] = 29\n self.day += 1\n if (days_in_month[self.month] == 31 and self.day > 31 and self.month != 12) or (days_in_month[self.month] == 30 and self.day > 30) or (self.is_leap_year() and self.day > 29 and self.month == 2) or (not self.is_leap_year() and self.day > 28 and self.month == 2):\n self.day = 1\n self.month += 1\n if self.month == 12 and self.day > 31:\n self.day = 1\n self.month = 1\n self.year += 1", "def test_second_date_static_1(self):\n input_ = (datetime.date(1993, 1, 29), datetime.date(1993, 3, 1))\n expected = (datetime.date(1993, 1, 30), datetime.date(1993, 3, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def next_deadline():\n\n today = date.today()\n\n days_since_starting_sunday = (today - date(2016, 9, 4)).days\n\n if days_since_starting_sunday % 14 < 7:\n return next_sunday(next_sunday(today))\n else:\n return next_sunday(today)", "def get_first_date(in_month=1):\n\n from_date = (today-relativedelta(months=in_month)).replace(day=1)\n \n return from_date", "def get_month_start(x: Optional[Date] = None) -> Date:\n return (x or get_today()).replace(day=1)", "def week_of_month(dt):\n\n first_day = dt.replace(day=1)\n\n dom = dt.day\n adjusted_dom = dom + first_day.weekday()\n\n return int(ceil(adjusted_dom/7.0))", "def increment_day(date):\n year, month, day = (date.year, date.month, date.day)\n try:\n day += 1\n return datetime.date(year, month, day)\n except ValueError:\n try:\n month += 1\n day = 1\n return datetime.date(year, month, day)\n except ValueError:\n try:\n year += 1\n month = 1\n day = 1\n return datetime.date(year, month, day)\n except ValueError:\n raise", "def date_to_midnight(date):\n return timezone.make_aware(timezone.datetime(date.year, date.month, date.day, 0, 0))", "def _FirstSunday(self, dtz): # pylint: disable-msg=C0103,R0201\n return dtz + datetime.timedelta(days=(6-dtz.weekday()))", "def test_second_date_static_2(self):\n input_ = (datetime.date(1993, 1, 30), datetime.date(1993, 3, 1))\n expected = (datetime.date(1993, 1, 31), datetime.date(1993, 3, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def make_tuesday(date):\n offset = (date.weekday() - 1) % 7\n tuesday = date - datetime.timedelta(days=offset)\n # Ensure that the database has this date\n with get_dbconn(\"postgis\") as conn:\n cursor = conn.cursor()\n cursor.execute(\"SELECT max(valid) from usdm\")\n maxdate = cursor.fetchone()[0]\n if maxdate is not None:\n tuesday = min([tuesday, maxdate])\n return tuesday", "def get_next_byday(self, daystring, startdate, fmt=None):\n\n # decimal number day of the week we're starting from. %w formats using Sunday as day 0.\n dow_start = int(datetime.datetime.strftime(startdate, '%w'))\n\n\n # decimal number day of week we're trying to get.\n dow_target = self.weekdays.index(daystring)\n\n # len - ((start + (len - target)) % len)\n days_ahead = 7 - ((dow_start + (7 - dow_target)) % 7)\n res = startdate + datetime.timedelta(days=days_ahead)\n return res", "def get_week_end(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof + TimeDelta(days=6 - (asof.isoweekday() - 1) % 7)", "def first_day_of_month(date):\n return date.replace(day=1)", "def first_day_of_month(date):\n return date.replace(day=1)", "def get_iex_next_day_ex_date(start=None, **kwargs):\n return NextDay(start=start, **kwargs).fetch()", "def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7", "def start_month(d):\n return date(d.year, d.month, 1)", "def day_of_month():\n return datetime.date.today().day", "def test_second_date_static_3(self):\n input_ = (datetime.date(1993, 1, 31), datetime.date(1993, 3, 1))\n expected = (datetime.date(1993, 2, 1), datetime.date(1993, 3, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def next_week(today: Optional[datetime] = None, tz: Any = None) -> Tuple[datetime, datetime]:\n if today is None:\n today = datetime.utcnow()\n begin = today + timedelta(days=7 - today.weekday())\n begin = datetime(year=begin.year, month=begin.month, day=begin.day)\n return localize_time_range(begin, begin + timedelta(days=7), tz)", "def evening_twilight_12(self, date=None):\n self.site.horizon = self.horizon12\n self._set_site_date(date)\n r_date = self.site.next_setting(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date" ]
[ "0.80878097", "0.69910073", "0.67567307", "0.67550325", "0.6671787", "0.66657865", "0.65446717", "0.65446717", "0.65441513", "0.6513178", "0.64433354", "0.6440269", "0.6345591", "0.63378316", "0.62782484", "0.6256524", "0.62425745", "0.6242084", "0.61851376", "0.61851376", "0.6117924", "0.61053866", "0.60922694", "0.609124", "0.6040508", "0.6029982", "0.6029481", "0.5989535", "0.59858453", "0.59600985", "0.59367764", "0.59294", "0.59270084", "0.5920164", "0.59181535", "0.5916475", "0.5895303", "0.5880744", "0.5864605", "0.5857778", "0.5851439", "0.5851181", "0.5851181", "0.5845572", "0.584387", "0.580872", "0.5772788", "0.5772491", "0.576306", "0.57290417", "0.57250684", "0.57102233", "0.5685783", "0.5664757", "0.5655428", "0.5654998", "0.5635068", "0.5629066", "0.5623986", "0.5610294", "0.5587837", "0.5583916", "0.558272", "0.5574173", "0.55624706", "0.5558797", "0.5553216", "0.5531742", "0.5513559", "0.55021644", "0.5495883", "0.5490495", "0.54900986", "0.54855245", "0.5482843", "0.5480315", "0.5467731", "0.5467158", "0.5466066", "0.54584324", "0.5453332", "0.5453227", "0.54337245", "0.54242295", "0.5415072", "0.5408129", "0.540246", "0.5401436", "0.5395327", "0.5386224", "0.53816307", "0.5362979", "0.5362979", "0.5361619", "0.5357877", "0.53559417", "0.5351582", "0.53486204", "0.53388894", "0.5312436" ]
0.8260075
0
Helper function adding some known todo list items for the test user
def _add_todo_items(self): todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser) todo_list.save() items = [ 'feed the cats', 'drive to work', 'read a book', 'eat some food', ] todo_items = [] for item in items: new_item = ToDoItem( title=item, to_do_list=todo_list, priority=1 ) new_item.save() todo_items.append(new_item) return todo_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_can_add_todo_list():\n scheduler = Scheduler()\n new_id = uuid.uuid4()\n\n scheduler.add_todo_list(new_id, \"my todo list\")\n\n Is(scheduler.get_amount_of_todo_lists()).not_none.integer.has_same_truth_of(1)", "def test_given_a_user_when_I_add_a_todo_Then_I_can_access_it_from_user_todo_collection(self):\n from .models import Tag\n from .models import TodoUser\n from .models import TodoItem\n\n user = TodoUser(\n email=u'king.arthur@example.com',\n first_name=u'Arthur',\n last_name=u'Pendragon',\n )\n self.session.add(user)\n\n tags = [u'quest', u'ni', u'knight']\n\n todo = TodoItem(user.email,\n u'Find a shrubbery', \n [u'quest', u'ni', u'knight'] \n ) \n self.session.add(todo)\n \n user_todo = user.todo_list.one()\n self.assertTrue(todo is user_todo)", "def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return", "def add_items(todofile, items):\n if(items is not None and len(items) > 0):\n for item in items:\n todofile.write_todo(parse_item(item))", "def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)", "def add_task(action, user):\n \n item = Item()\n item.description = action['what'].get('description', '')\n item.id = action['what']['id']\n item.position = action['what']['position']\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n l.items.append(item)\n l.save()\n \n return l", "def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')", "def add_list(user_id):\n\n list_title = request.form[\"list_title\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n if to_do_list:\n flash(\"List name already exists. Please select a new name.\")\n return redirect(\"/dashboard\")\n\n new_list = ToDoList(list_title=list_title, user_id=user_id)\n \n db.session.add(new_list)\n db.session.commit()\n \n return redirect(\"/dashboard\")", "def add_todo():\n task = flask.request.form[\"task\"]\n todos.append(ToDo(task))\n return \"success\"", "def add_items(list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def todo_added(name, description):", "def test_list_user(self):\n pass", "def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))", "def add_list(self):\n the_list = models.List(user_id=1,\n list_name=self.test_list,\n description=self.test_list_desc)\n the_list.add()", "def test_list_notes(self):\n pass", "def list_2_for_tests(db_setup, user_for_test):\n heading = \"test_2_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)", "def do_list_items(self, arg):\n try:\n cprint (\"These are your items: \\n\", 'blue')\n my_items = arg[\"<all_items>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n my_items_str = \" \".join(my_items)\n print(my_items_str)\n elif choice == \"id\":\n my_items_str = int(\" \".join(my_items))\n print (my_items_str)\n app.ToDoApp.to_view_items(my_items_str)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def test_creating_todo(todoApp, input):\n # Create new todo\n new_todo_input = todoApp.find_new_todo_input()\n print new_todo_input\n new_todo_input.send_keys(input, Keys.ENTER)\n\n # ASSERTION\n # Check whether the new todo exist in the todo list or not.\n todo = todoApp.find_todo(input)\n \n # Check the new todo status, it should active.\n assert todoApp.is_active_todo(todo)\n \n # Check the active todo count\n assert todoApp.count_active_todos() == '1 item left'", "def test_todo(self):\n self.assertEqual(self.my_todo.state, \"T\")\n self.assertEqual(self.my_todo.due_date, date_today)\n self.assertEqual(self.my_todo.text, \"Call Mom\")\n self.assertEqual(str(self.my_todo), \"Call Mom\")", "def test_adding_many_todos(self):\n event = Event.objects.filter(slug__endswith=\"-upcoming\") \\\n .order_by(\"-pk\")[0]\n event.end = event.start + datetime.timedelta(days=2)\n event.save()\n\n # check if the event has 0 todos\n assert event.todoitem_set.all().count() == 0\n\n # add standard todos\n ident = event.get_ident()\n url, form = self._get_initial_form('todos_add', ident)\n\n # fix: turn Nones into empty strings\n for key, value in form.items():\n if value is None:\n form[key] = ''\n\n rv = self.client.post(reverse('todos_add', args=[ident]), form)\n\n # let's check if the form passes\n assert rv.status_code == 302\n\n # finally let's check there are some new todos\n assert event.todoitem_set.all().count() == 9", "def create_dummy_content(user_id):\n task = TodoItem(\n user=user_id,\n task=u'Find a shrubbery',\n tags=[u'quest', u'ni', u'knight'],\n due_date=datetime.utcnow() + timedelta(days=60),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Search for the holy grail',\n tags=[u'quest'],\n due_date=datetime.utcnow() - timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Recruit Knights of the Round Table',\n tags=[u'quest', u'knight', u'discuss'],\n due_date=datetime.utcnow() + timedelta(minutes=45),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Build a Trojan Rabbit',\n tags=[u'quest', u'rabbit'],\n due_date=datetime.utcnow() + timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Talk to Tim the Enchanter',\n tags=[u'quest', u'discuss'],\n due_date=datetime.utcnow() + timedelta(days=90),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Defeat the Rabbit of Caerbannog',\n tags=[u'quest', u'rabbit'],\n due_date=None,\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Cross the Bridge of Death',\n tags=[u'quest'],\n due_date=None,\n )\n DBSession.add(task)", "def do_item_add(self, arg):\n try:\n add_item = arg[\"<item_name>\"]\n add_item_str = \" \".join(add_item)\n app.ToDoApp.to_add_item(add_item_str, add_item = True)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def list_1_for_tests(db_setup, user_for_test):\n heading = \"test_1_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def do_list(self, arg):\n try:\n cprint (\"Here are your todo lists: \\n\", 'blue')\n app.ToDoApp.to_view_todo()\n\n except ValueError as e:\n cprint(e, 'red')", "def test_post_foods_list(self):\n pass", "def post(self, dnzo_user):\n from tasks_data.task_lists import add_task_list, get_task_list\n \n task_list_name = self.request.get('task_list_name', None)\n if not task_list_name:\n self.bad_request(\"Must provide task_list_name to create a new list\")\n return\n \n new_list = add_task_list(dnzo_user, task_list_name)\n if not new_list:\n self.bad_request(\"Could not add the new task list!\")\n return\n \n self.json_response(task_list=new_list.to_dict())", "def add_item(self):\n\n self.todo_scroll_cell.add_item(f'{self.new_todo_textbox.get()}')", "def test_add(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n task = r.get(\"ToDo\")\n self.assertTrue(task, \"No such entry in DB. Adding failed.\")", "def add_shared_items(shared_list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(shared_list_id)\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{shared_list_id}\")", "def add_shoppinglist(self, user_id, name):\n new_shoppinglist = ShoppingList(name)\n new_shoppinglist_details = new_shoppinglist.get_details()\n user = self.get_single_user(user_id)\n new_shoppinglist_details['id'] = len(user['shopping_lists']) + 1\n for item in user['shopping_lists']:\n if item['name'].lower() == name.lower():\n return \"Shopping list \" + str(name) + \" exits. Try editing it\"\n if new_shoppinglist_details['id'] == item['id']:\n new_shoppinglist_details['id'] = (\n new_shoppinglist_details['id'] + 1\n )\n user['shopping_lists'].append(new_shoppinglist_details)\n return \"Shopping list \" + str(name) + \" Created\"", "def test_add(self):\n # add a todo\n self.add(title=\"Sample task todo\", description=\"for sample\", state=\"todo\")\n task = Task.query.filter_by(title='Sample task todo').first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'todo')\n\n # add a doing\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title=\"Sample task doing\").first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'doing')\n\n # add a done\n self.add(title=\"Sample task done\", description=\"for sample\", state=\"done\")\n task = Task.query.filter_by(title='Sample task done').first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'done')", "def interactive(todofile):\n tmpfile = tempfile.NamedTemporaryFile(suffix='.txt', prefix='todo-',\n delete=False)\n print >> tmpfile\n print >> tmpfile , '# Todo items should be formed as <date> -- <todo>'\n print >> tmpfile , '# The date field is optional.'\n print >> tmpfile , '# Lines starting with # are ignored.'\n tmpfile.close()\n subprocess.call(['sensible-editor', tmpfile.name])\n with open(tmpfile.name) as writtenfile:\n add_items(todofile, writtenfile.readlines())\n os.remove(tmpfile.name)", "def test_app_can_add_list(self):\n add_list=self.client.post('/addshoppinglists/?user='+self.user['user'], \n data=self.shopllist, \n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(add_list.status_code,200)", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_task_list():\n # Fake pyramid request, useful for testing.\n request = testing.DummyRequest()\n\n pytest.fail('Not implemented yet.')", "def test_add_followers(self):\n pass", "def test_wantlist(self):\n # Fetch the user/wantlist from the filesystem\n u = self.d.user('example')\n self.assertEqual(len(u.wantlist), 3)\n\n # Stub out expected responses\n self.m._fetcher.fetcher.responses = {\n '/users/example/wants/5': (b'{\"id\": 5}', 201),\n '/users/example/wants/1': (b'', 204),\n }\n\n # Now bind the user to the memory client\n u.client = self.m\n\n u.wantlist.add(5)\n method, url, data, headers = self.m._fetcher.last_request\n self.assertEqual(method, 'PUT')\n self.assertEqual(url, '/users/example/wants/5')\n\n u.wantlist.remove(1)\n method, url, data, headers = self.m._fetcher.last_request\n self.assertEqual(method, 'DELETE')\n self.assertEqual(url, '/users/example/wants/1')", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def add_list(\n cls,\n name,\n user_id\n ):\n lists_collection = taskify['lists']\n new_list = {\n \"name\": name,\n \"user_id\": ObjectId(user_id)\n }\n list_id = lists_collection.insert_one(new_list).inserted_id\n return new_list, list_id", "def test_users_listed(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n # Assertions\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def add_list(request) -> HttpResponse:\n\n # Only staffers can add lists, regardless of TODO_STAFF_USER setting.\n if not request.user.is_staff:\n raise PermissionDenied\n\n if request.POST:\n form = AddTaskListForm(request.user, request.POST)\n if form.is_valid():\n try:\n newlist = form.save(commit=False)\n newlist.slug = slugify(newlist.name, allow_unicode=True)\n newlist.save()\n messages.success(request, \"A new list has been added.\")\n return redirect(\"todo:lists\")\n\n except IntegrityError:\n messages.warning(\n request,\n \"There was a problem saving the new list. \"\n \"Most likely a list with the same name in the same group already exists.\",\n )\n else:\n if request.user.groups.all().count() == 1:\n # FIXME: Assuming first of user's groups here; better to prompt for group\n form = AddTaskListForm(request.user, initial={\"group\": request.user.groups.all()[0]})\n else:\n form = AddTaskListForm(request.user)\n\n context = {\"form\": form}\n\n return render(request, \"todo/add_list.html\", context)", "def create_item(self, user: User, **kwargs) -> None:", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)", "def todo_list_view(request):\n\n context = {}\n queryset = Todo.objects.filter(user=request.user)\n context['lists'] = queryset\n return render(request,'todos/index.html', context)", "def get_own_todos(current_user: models.User = Depends(get_current_user),\n \tdb: Session = Depends(get_db)):\n todos = blogcrud.get_user_todos(db, current_user.id)\n return todos", "def add_shoppingitems(self, user_id, shoppinglist_id, name, quantity):\n new_shoppingitem = ShoppingItem(name, quantity)\n new_shoppingitem_details = new_shoppingitem.get_details()\n user = self.get_single_user(user_id)\n for shopinglist in user['shopping_lists']:\n if shopinglist['id'] == int(shoppinglist_id):\n curr_shopinglist = shopinglist\n new_shoppingitem_details['id'] = (\n len(curr_shopinglist['items']) + 1)\n for item in curr_shopinglist['items']:\n if item['name'].lower() == name.lower():\n return \"Item \" + str(name) + \" exits. Try editing it\"\n if new_shoppingitem_details['id'] == item['id']:\n new_shoppingitem_details['id'] = (\n new_shoppingitem_details['id'] + 1\n )\n curr_shopinglist['items'].append(new_shoppingitem_details)\n return str(name) + \" has been added\"", "def add_item(self):\n item = models.Item(item_name=self.test_item,\n list_id=1,\n description=self.test_item_desc)\n item.add()", "def add_item(self, list_name: str, item_name: str, item_description: str) -> None:\n todo_list = self.get_list(list_name)\n todo_list.add_item(Item(item_name, item_description))", "def test_add(self):\n # Everything added will be deleted later in test_delete.\n first_name = 'Trevor'\n last_name = 'Harvey'\n entry_date = '04/19/2012'\n title = 'Test'\n minutes = 34\n notes = 'testing entries. and regex (555) 555-3425'\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)\n # second test add\n first_name = 'Nik'\n last_name = 'Silver'\n entry_date = '01/14/1827'\n title = 'random@mail.com'\n minutes = 34\n notes = 'This is an email test.'\n\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)", "def test_new_entry_adds_to_list(testapp, set_auth_credentials, login_testcase):\n response = testapp.get(\"/journal/new-entry\")\n csrf_token = response.html.find(\"input\", {\"name\": \"csrf_token\"})\n csrf_token = csrf_token.attrs['value']\n\n post_params = {\n 'title': 'Learning Journal Title',\n 'body': 'So many things learned today.',\n 'csrf_token': csrf_token\n }\n\n response = testapp.post('/journal/new-entry', post_params, status=302)\n full_response = response.follow()\n new_title = full_response.html.find(id='journal-entry').a.text\n assert new_title == post_params['title']", "def set_items(self) -> None:\n if not self.__repeat:\n self._username: str = input(\"Introduzca el nombre del usuario: \")\n self._ordered_items = self.get_rights()[0]\n if not self._ordered_items:\n if self.__option == 1:\n self.print_error(\"El usuario ya tiene todos los permisos.\")\n return\n self.print_error(\"El usuario no tiene ningún permiso.\")\n return", "def addItems(*args):", "def add_todo(taskname, deadline, priority, reminder, deleteflag):\n autodel()\n task = {\n 'name': taskname,\n 'deadline': str(deadline),\n 'priority': priority,\n 'reminder': reminder,\n 'no_del': deleteflag\n }\n\n if not exists(task['name']):\n with open(todofile, 'a') as todo:\n try:\n jdump = json.dumps(task) + '\\n'\n todo.write(jdump)\n return 0\n except json.decoder.JSONDecodeError:\n return 1", "def test_list(self):\n pass", "def test_list(self):\n pass", "def add_list(action, user):\n \n userprofile = user.get_profile()\n \n board = userprofile.get_board(action['boardId'])\n \n # Create the list\n l = List()\n l.title = action['what']['title']\n l.color = action['what']['color']\n l.creator = user\n l.save()\n \n # Add the list to the user's lists\n \n board.lists.append(l.id)\n userprofile.save()\n \n return l;", "def test_users_listed(self):\n # the url is defined in django admin documentation\n # it generate the url for the list of user page\n # it is good using that instead of the url in case it changes\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "async def todo(self, ctx):\n\n cursor = await db.execute(\"Select Thing from Todo where MemberID = ?\", (ctx.author.id,))\n result = await cursor.fetchall()\n\n if not result:\n return await send_embed(ctx, \"You do not have anything on your todo list.\", negative=True)\n\n result = [i[0] for i in result]\n\n embeds = []\n description = []\n\n for index, string in enumerate(result, start=1):\n\n description.append(f\"{index}. {string}\")\n\n if index % 10 == 0 or index == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n description=\"\\n\".join(description)\n )\n embed.set_author(name=str(ctx.author), icon_url=str(ctx.author.avatar_url))\n embeds.append(embed)\n description = []\n\n await self.bot.paginate(ctx, embeds)", "def _backfill_todo_items_for_previous_day(self):\n previous_day_date = self.day.date - timedelta(days=1)\n day, created = Day.get_or_create(date=previous_day_date)\n\n todo_list = ToDoList(day=day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'cut the grass',\n 'water the plants',\n 'take out the trash',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items", "def test_add_with_new_item(self):\n settings.TEST_SETTING_LIST = []\n wrapper = SettingListWrapper('TEST_SETTING_LIST', 'test setting list')\n wrapper.add('item1')\n\n self.assertEqual(settings.TEST_SETTING_LIST, ['item1'])\n self.assertEqual(wrapper.ref_counts.get('item1'), 1)", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def test_listWithDisabled(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n\n def cb(xxx_todo_changeme2):\n (interface, avatar, logout) = xxx_todo_changeme2\n avatar.disabled = 1\n output = self.userbase('list')\n self.assertEqual(output,\n ['alice@localhost', 'bob@localhost [DISABLED]'])\n\n return self._login('bob@localhost', SECRET).addCallback(cb)", "def test_add_item_adds_multiple_entries():\n sc.menu = sc.default_menu\n sc.current.add_item('Coffee', 2)\n sc.current.add_item('Coffee', 1)\n sc.current.add_item('Tea', 1)\n assert sc.current.receipt == {'subtotal': 6.36, 'Coffee': 3, 'Tea': 1}", "def test_list(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n output = self.userbase('list')\n self.assertEqual(output, ['alice@localhost', 'bob@localhost'])", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n #assert are django checks on http request is 200\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def setup_lists(self):\n pass", "def test_edit(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n main.List.edit(r, \"ToDo\", 1, \"Buy bananas, not apples\", 2, \"20.05.2015\")\n task = main.List.pull_from_redis(r, \"ToDo\", False)\n if task and task is not None:\n check = task[\"1\"][\"description\"] == \"Buy bananas, not apples\"\n self.assertTrue(check, \"Editing failed.\")", "def create_list(self, name) -> TodoList:\n t = TodoList(name, [])\n if name in self.lists:\n raise HTTPException(409, f\"TODO list with name {name} already exists\")\n self.lists[self.__to_key(name)] = t\n return t", "def add(request):\n\n context = {}\n forms = TodoForm()\n if request.method == \"POST\":\n forms = TodoForm(request.POST or None)\n if forms.is_valid():\n # If form is valid then create the todo\n forms = forms.save()\n # Update the user of new created user to be the logged in user\n forms.user = request.user\n # Save updated form\n forms.save()\n # Redirect to the home page\n return redirect(\"index\")\n \n context['form'] = forms\n return render(request, \"todos/add.html\", context)", "def create_toptenlist(self, user_ref, index):\n self.client.force_authenticate(user=getattr(self, user_ref))\n response = self.client.post(create_list_url, toptenlist_data_1, format='json')\n toptenlist_id = json.loads(response.content)['id']\n\n toptenlist_ref = 'toptenlist_' + str(index) # refer to toptenlist by self.toptenlist_1 etc\n\n # this allows us to reference the originial toptenlist from self\n # self.toptenlist_1 etc\n # this is not safe for properties like name, but is safe for getting toptenlist and toptenitem id because these do not change\n setattr(self, toptenlist_ref, TopTenList.objects.get(pk=toptenlist_id))\n\n # the request should succeed\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.client.logout()", "async def test_todo_feed_response_is_ordered_correctly(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n res = await authorized_client.get(app.url_path_for(\"feed:get-todo-feed-for-user\"))\n assert res.status_code == status.HTTP_200_OK\n todo_feed = res.json()\n # the first 13 should be updated and the rest should not be updated\n for feed_item in todo_feed[:13]:\n assert feed_item[\"event_type\"] == \"is_update\"\n for feed_item in todo_feed[13:]:\n assert feed_item[\"event_type\"] == \"is_create\"", "def read_todo_file(self):\n\n todo = []\n in_progress = []\n done = []\n if os.path.exists('TODO.txt'):\n todo_fp = open('TODO.txt', 'r')\n state = 0\n line = todo_fp.readline()\n while line:\n line = line.strip()\n if state == 0:\n if line == '__IN_PROGRESS__':\n state = 1\n elif len(line) > 1:\n todo.append(line)\n elif state == 1:\n if line == '__DONE__':\n state = 2\n elif len(line) > 1:\n in_progress.append(line)\n elif state == 2:\n if len(line) > 1:\n done.append(line)\n line = todo_fp.readline()\n todo_fp.close()\n self.todo_scroll_cell.add_item_list(todo)\n self.in_progress_scroll_cell.add_item_list(in_progress)\n self.done_scroll_cell.add_item_list(done)", "def add_to_list(my_list):\n\n list_item = raw_input(\"What would you like to add to the list? \")\n my_list.append(list_item)", "def main():\n url = 'https://jsonplaceholder.typicode.com'\n user = '{}/users/'.format(url)\n todos = '{}/todos/'.format(url)\n\n # GET info from URLs\n res = requests.get(user)\n info = res.json()\n tasks = requests.get(todos)\n todo = tasks.json()\n file_name = 'todo_all_employess.json'\n\n user_id = {}\n user_name = {}\n\n for user in info:\n id = user.get('id')\n user_id[id] = []\n user_name[id] = user.get('username')\n\n for task in todo:\n task_id = {}\n id = task.get('userId')\n task_id = {'username': user_name.get(id), 'task': task.get('title'),\n 'completed': task.get('completed')}\n user_id.get(id).append(task_id)\n\n with open(file_name, 'w') as filename:\n json.dump(user_id, filename)", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def test_can_list(self):\n post_req = self.post_json(\n 'users',\n {\n \"data\": {\n \"type\": \"user\",\n \"attributes\": {\n \"uid\": \"90792532401de273\",\n \"social\": \"PURPLE\",\n \"name\": \"Екатерина Трошина\"\n },\n }\n }\n )\n self.assertEqual(post_req.status_code, 201)\n\n get_req = self.get('users')\n self.assertEqual(get_req.status_code, 200)\n\n users_list = get_req.json['data']\n self.assertEqual(len(users_list), 1)\n\n post_req = self.post_json(\n 'users',\n {\n \"data\": {\n \"type\": \"user\",\n \"attributes\": {\n \"uid\": \"5868ca829b560a1d\",\n \"social\": \"BLUE\",\n \"name\": \"Инна Авдюшина\"\n },\n }\n }\n )\n self.assertEqual(post_req.status_code, 201)\n\n get_req = self.get('users')\n self.assertEqual(get_req.status_code, 200)\n\n users_list = get_req.json['data']\n self.assertEqual(len(users_list), 2)", "def setup_public_reusable_item_1(self):\n\n # ensure reusable item is public\n reusableitem = ReusableItem.objects.get(pk=self.reusableitem_1.id)\n reusableitem.is_public = True\n reusableitem.save()\n\n # add a reference to this reusable item by user 2\n create_toptenlist(self, 'user_2', 2) # create a list for user 2\n reference_reusable_item(self, 'user_2', self.reusableitem_1.id, 'toptenlist_2', 0)\n\n return reusableitem", "def test_2_addautor(self):\n for nome, email, grupo in ((\"Autor 1\", \"email@email.com\", \"grupo 1\"),\n (\"Autor 2\", \"email@email.com\", \"\")):\n self.app.addAutor(nome=nome,\n email=email,\n grupo=grupo)", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def add_element_function(listt):\n\n\tservice_name = str(input(\"Enter web site/app name: \"))\n\tuser_name = str(input(\"Enter user name: \"))\n\tuser_password = str(input(\"Enter user password: \"))\n\n\tnew_item = {\n\t\t\"service\": service_name, \n\t\t\"user\": user_name,\n\t\t\"password\": user_password\n\t\t}\n\n\tlistt.append(new_item)\n\tprint(\"Status: Element added\")\n\tprint(f\"Current elements: {len(listt)}\")\n\tprint()\n\treturn listt", "def get_todos(self):\n if self.is_new:\n # if its a new project then create the todo items from the yml\n # templates\n return self.get_yml_todos()\n else:\n # check for existing todos\n return self.get_db_todos()", "def todos_add(request, event_ident):\n try:\n event = Event.get_by_ident(event_ident)\n except Event.DoesNotExist:\n raise Http404('Event matching query does not exist.')\n\n dt = datetime.datetime\n timedelta = datetime.timedelta\n\n initial = []\n base = dt.now()\n if event.start and event.end:\n extra = 9\n else:\n extra = 10\n initial = [\n {\n 'title': 'Set date with host',\n 'due': dt.now() + timedelta(days=30),\n 'event': event,\n },\n ]\n\n TodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm,\n extra=extra)\n\n formset = TodoFormSet(queryset=TodoItem.objects.none(), initial=initial + [\n {\n 'title': 'Set up a workshop website',\n 'due': base + timedelta(days=7),\n 'event': event,\n },\n {\n 'title': 'Find instructor #1',\n 'due': base + timedelta(days=14),\n 'event': event,\n },\n {\n 'title': 'Find instructor #2',\n 'due': base + timedelta(days=14),\n 'event': event,\n },\n {\n 'title': 'Follow up that instructors have booked travel',\n 'due': base + timedelta(days=21),\n 'event': event,\n },\n {\n 'title': 'Set up pre-workshop survey',\n 'due': event.start - timedelta(days=7) if event.start else '',\n 'event': event,\n },\n {\n 'title': 'Make sure instructors are set with materials',\n 'due': event.start - timedelta(days=1) if event.start else '',\n 'event': event,\n },\n {\n 'title': 'Submit invoice',\n 'due': event.end + timedelta(days=2) if event.end else '',\n 'event': event,\n },\n {\n 'title': 'Make sure instructors are reimbursed',\n 'due': event.end + timedelta(days=7) if event.end else '',\n 'event': event,\n },\n {\n 'title': 'Get attendee list',\n 'due': event.end + timedelta(days=7) if event.end else '',\n 'event': event,\n },\n ])\n\n if request.method == 'POST':\n formset = TodoFormSet(request.POST)\n if formset.is_valid():\n formset.save()\n messages.success(request, 'Successfully added a bunch of TODOs.',\n extra_tags='todos')\n return redirect(reverse(event_details, args=(event.get_ident(), )))\n else:\n messages.error(request, 'Fix errors below.')\n\n context = {\n 'title': 'Add standard TODOs to the event',\n 'formset': formset,\n 'helper': bootstrap_helper_inline_formsets,\n 'event': event,\n }\n return render(request, 'workshops/todos_add.html', context)", "def setUp(self):\n self.new_inv_item = ['1', 'Knife Set', 10, 'n', 'n']\n self.new_furn_item = ['2', 'Couch', 25, 'y', 'Cloth', 'L']\n self.new_elec_item = ['3', 'Dryer', 100, 'n', 'y', 'Samsung', 12]", "def test_add_with_existing_item(self):\n settings.TEST_SETTING_LIST = ['item1']\n wrapper = SettingListWrapper('TEST_SETTING_LIST', 'test setting list')\n wrapper.add('item1')\n\n self.assertEqual(settings.TEST_SETTING_LIST, ['item1'])\n self.assertEqual(wrapper.ref_counts.get('item1'), 2)", "def get_todo_list():\n\n # assume that a \"h264\" encoded file is complete\n return models.LibraryItem.objects.filter(h264=False)", "def todo():\n print(\"OK\")", "def addItem(list,item):\n print \"I added this item: \", item\n list.append(item)", "def _add_list(api, item):\n print(' Adding %s' % item['name'])\n try:\n api.add_rule_lists(item)\n except AssertionError:\n # Raise this to facilitate unit testing\n raise\n except Exception as e: # pylint: disable=broad-except\n print(' Failed: %s' % e)", "def test_create_new_shopping_list_correct_user(create_user, create_shopping_list): # noqa\n shopping_list = create_shopping_list\n owner = create_user\n assert shopping_list.owner == owner", "def test_add_user(self):\n pass", "def test_users_moira_list(logged_in_apiclient, mock_moira_client):\n client, user = logged_in_apiclient\n user.is_staff = True\n user.save()\n client.force_login(user)\n list_names = [\"test_moira_list01\", \"test_moira_list02\"]\n mock_moira_client.return_value.user_list_membership.return_value = [\n {\"listName\": list_name} for list_name in list_names\n ]\n\n username_or_email = [\n user.username,\n user.email,\n UserFactory(email=\"user-name.1@mit.edu\").email,\n ]\n\n for arg in username_or_email:\n url = reverse(\"member-lists\", kwargs={\"username_or_email\": arg})\n expected = {\"user_lists\": list_names}\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK\n assert expected == response.data", "def view_list():\n # an HTML representation of the user shopping list\n printed_list = user[\"name\"]\n printed_list += \"<form>\"\n printed_list += '<br>'\n printed_list += 'New Item:<br>'\n printed_list += '<input type=\"text\" name=\"newitem\">'\n printed_list += '<br>'\n printed_list += '<input type=\"submit\" value=\"Submit\">'\n printed_list += \"</form>\"\n printed_list += list_to_html(user[\"shopping_list\"])\n\n return printed_list", "def add(request):\n\tif request.method == 'GET':\n\t\tID = request.GET.get('id',False)\n\t\tstatus = request.GET.get('status',False)\n\t\ttaskname = request.GET.get('taskname',False)\n\t\tdescription = request.GET.get('description','')\n\n\t\tprint(taskname)\n\t\terror = {}\n\t\tif not ID:\n\t\t\terror['error'] = \"id not given\"\n\t\telif not status:\n\t\t\terror['error'] = \"status not given\"\n\t\telif not taskname:\n\t\t\terror['error'] = \"taskname not given\"\n\t\telif not description:\n\t\t\terror['error'] = \"description not given\"\n\t\telse:\n\t\t\ttodo['task'].append({\"id\":ID,\"status\":status,\"taskname\":taskname,\"description\":description})\n\n\t\tif len(error) != 0:\n\t\t\tresponse = error\n\t\telse:\n\t\t\tresponse = todo['task'][-1]\n\n\treturn JsonResponse(response)", "def save_todo_file(self):\n\n if os.path.exists('TODO.txt'):\n os.remove('TODO.txt')\n todo_fp = open('TODO.txt', 'w')\n todo_items = self.todo_scroll_cell.get_item_list()\n in_progress_items = self.in_progress_scroll_cell.get_item_list()\n done_items = self.done_scroll_cell.get_item_list()\n for item in todo_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__IN_PROGRESS__' + '\\n')\n for item in in_progress_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__DONE__' + '\\n')\n for item in done_items:\n todo_fp.write(item + '\\n')\n todo_fp.close()\n self.master.show_message_popup('Saved', 'Your TODO list has been saved!')", "def test_add_item_using_post(self):\n pass" ]
[ "0.67881167", "0.6779041", "0.67479783", "0.6636232", "0.65715206", "0.6420959", "0.63876003", "0.63576937", "0.6330586", "0.62917775", "0.62856203", "0.6273896", "0.62697417", "0.6185657", "0.6137805", "0.6080443", "0.60529596", "0.60524225", "0.601782", "0.6005775", "0.5996415", "0.59884447", "0.5958338", "0.59508663", "0.5936028", "0.592948", "0.5891282", "0.5853943", "0.58322465", "0.580869", "0.578421", "0.57815754", "0.57763785", "0.5766966", "0.5744906", "0.57298213", "0.57275397", "0.5725672", "0.5712867", "0.5711522", "0.57106", "0.5706564", "0.57043487", "0.5697918", "0.56958455", "0.56935245", "0.5685117", "0.5672004", "0.56418276", "0.562445", "0.5613604", "0.5604383", "0.55867076", "0.5578164", "0.55759907", "0.5574553", "0.5573428", "0.5573428", "0.5565127", "0.55610913", "0.55607575", "0.5559271", "0.5551607", "0.5533613", "0.5527684", "0.5511477", "0.5501883", "0.547604", "0.5470979", "0.5462091", "0.54595", "0.54587674", "0.5458328", "0.54550576", "0.5454915", "0.5453382", "0.54479116", "0.544778", "0.544778", "0.544778", "0.5444612", "0.5444518", "0.54435927", "0.5430399", "0.5429216", "0.54267776", "0.5402618", "0.53962725", "0.5394971", "0.5389226", "0.5373328", "0.5371654", "0.53707737", "0.5360734", "0.5354415", "0.5353259", "0.53527904", "0.53454983", "0.533835", "0.5337849" ]
0.74570924
0
Helper function adding some known todo list items for the test user for the previous day
def _backfill_todo_items_for_previous_day(self): previous_day_date = self.day.date - timedelta(days=1) day, created = Day.get_or_create(date=previous_day_date) todo_list = ToDoList(day=day, user=self.user.user.rolllistuser) todo_list.save() items = [ 'cut the grass', 'water the plants', 'take out the trash', ] todo_items = [] for item in items: new_item = ToDoItem( title=item, to_do_list=todo_list, priority=1 ) new_item.save() todo_items.append(new_item) return todo_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_todo_items(self):\n\n todo_list = ToDoList(day=self.day, user=self.user.user.rolllistuser)\n todo_list.save()\n\n items = [\n 'feed the cats',\n 'drive to work',\n 'read a book',\n 'eat some food',\n ]\n todo_items = []\n for item in items:\n new_item = ToDoItem(\n title=item,\n to_do_list=todo_list,\n priority=1\n )\n new_item.save()\n todo_items.append(new_item)\n return todo_items", "def test_can_add_todo_list():\n scheduler = Scheduler()\n new_id = uuid.uuid4()\n\n scheduler.add_todo_list(new_id, \"my todo list\")\n\n Is(scheduler.get_amount_of_todo_lists()).not_none.integer.has_same_truth_of(1)", "def todo_added(name, description):", "def add_item(todo_list):\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return", "async def test_todo_feed_response_is_ordered_correctly(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n res = await authorized_client.get(app.url_path_for(\"feed:get-todo-feed-for-user\"))\n assert res.status_code == status.HTTP_200_OK\n todo_feed = res.json()\n # the first 13 should be updated and the rest should not be updated\n for feed_item in todo_feed[:13]:\n assert feed_item[\"event_type\"] == \"is_update\"\n for feed_item in todo_feed[13:]:\n assert feed_item[\"event_type\"] == \"is_create\"", "def add_task(action, user):\n \n item = Item()\n item.description = action['what'].get('description', '')\n item.id = action['what']['id']\n item.position = action['what']['position']\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n l.items.append(item)\n l.save()\n \n return l", "def add_item(todo_list, todo_new_item):\n check = True\n try:\n todo_list.append(todo_new_item)\n except todo_list:\n print(\"Could not add new item to todo list\")\n check = False\n\n return check", "def write_todo(self, todo):\n if todo != None:\n print 'added \"%s\"' % todo.text\n self.new_items.append(todo)", "def add_item_to_list(self, todolist):\n\t\tnote = self.get_all_text_view_text(self.textview_add)\n\t\ttodolist.add_item(note)\n\t\tself.textview_add.get_buffer().set_text('')", "def add_list(user_id):\n\n list_title = request.form[\"list_title\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.filter_by(list_title=list_title).first()\n\n if to_do_list:\n flash(\"List name already exists. Please select a new name.\")\n return redirect(\"/dashboard\")\n\n new_list = ToDoList(list_title=list_title, user_id=user_id)\n \n db.session.add(new_list)\n db.session.commit()\n \n return redirect(\"/dashboard\")", "def add_items(todofile, items):\n if(items is not None and len(items) > 0):\n for item in items:\n todofile.write_todo(parse_item(item))", "def test_future_question_past_question(self):\n create_todo(todo_text=\"Future todo\", days=30)\n create_todo(todo_text=\"Past todo\", days=-30)\n response = self.client.get(reverse('todolist:index'))\n self.assertQuerysetEqual(\n response.context['todo_items_list'],\n ['<TodoItem: >']\n )", "def todo(self):\n # sort events with eventid using datetime string\n pass", "def do_todo_create(self, arg):\n try:\n my_list = arg[\"<list_name>\"]\n my_list_str = \" \".join(my_list) \n app.ToDoApp.to_create_todo(my_list_str)\n \n except ValueError as e:\n cprint(e, 'red')", "def test_adding_many_todos(self):\n event = Event.objects.filter(slug__endswith=\"-upcoming\") \\\n .order_by(\"-pk\")[0]\n event.end = event.start + datetime.timedelta(days=2)\n event.save()\n\n # check if the event has 0 todos\n assert event.todoitem_set.all().count() == 0\n\n # add standard todos\n ident = event.get_ident()\n url, form = self._get_initial_form('todos_add', ident)\n\n # fix: turn Nones into empty strings\n for key, value in form.items():\n if value is None:\n form[key] = ''\n\n rv = self.client.post(reverse('todos_add', args=[ident]), form)\n\n # let's check if the form passes\n assert rv.status_code == 302\n\n # finally let's check there are some new todos\n assert event.todoitem_set.all().count() == 9", "def add_todo():\n task = flask.request.form[\"task\"]\n todos.append(ToDo(task))\n return \"success\"", "def list_2_for_tests(db_setup, user_for_test):\n heading = \"test_2_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def add_items(list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def test_given_a_user_when_I_add_a_todo_Then_I_can_access_it_from_user_todo_collection(self):\n from .models import Tag\n from .models import TodoUser\n from .models import TodoItem\n\n user = TodoUser(\n email=u'king.arthur@example.com',\n first_name=u'Arthur',\n last_name=u'Pendragon',\n )\n self.session.add(user)\n\n tags = [u'quest', u'ni', u'knight']\n\n todo = TodoItem(user.email,\n u'Find a shrubbery', \n [u'quest', u'ni', u'knight'] \n ) \n self.session.add(todo)\n \n user_todo = user.todo_list.one()\n self.assertTrue(todo is user_todo)", "def todos_add(request, event_ident):\n try:\n event = Event.get_by_ident(event_ident)\n except Event.DoesNotExist:\n raise Http404('Event matching query does not exist.')\n\n dt = datetime.datetime\n timedelta = datetime.timedelta\n\n initial = []\n base = dt.now()\n if event.start and event.end:\n extra = 9\n else:\n extra = 10\n initial = [\n {\n 'title': 'Set date with host',\n 'due': dt.now() + timedelta(days=30),\n 'event': event,\n },\n ]\n\n TodoFormSet = modelformset_factory(TodoItem, form=SimpleTodoForm,\n extra=extra)\n\n formset = TodoFormSet(queryset=TodoItem.objects.none(), initial=initial + [\n {\n 'title': 'Set up a workshop website',\n 'due': base + timedelta(days=7),\n 'event': event,\n },\n {\n 'title': 'Find instructor #1',\n 'due': base + timedelta(days=14),\n 'event': event,\n },\n {\n 'title': 'Find instructor #2',\n 'due': base + timedelta(days=14),\n 'event': event,\n },\n {\n 'title': 'Follow up that instructors have booked travel',\n 'due': base + timedelta(days=21),\n 'event': event,\n },\n {\n 'title': 'Set up pre-workshop survey',\n 'due': event.start - timedelta(days=7) if event.start else '',\n 'event': event,\n },\n {\n 'title': 'Make sure instructors are set with materials',\n 'due': event.start - timedelta(days=1) if event.start else '',\n 'event': event,\n },\n {\n 'title': 'Submit invoice',\n 'due': event.end + timedelta(days=2) if event.end else '',\n 'event': event,\n },\n {\n 'title': 'Make sure instructors are reimbursed',\n 'due': event.end + timedelta(days=7) if event.end else '',\n 'event': event,\n },\n {\n 'title': 'Get attendee list',\n 'due': event.end + timedelta(days=7) if event.end else '',\n 'event': event,\n },\n ])\n\n if request.method == 'POST':\n formset = TodoFormSet(request.POST)\n if formset.is_valid():\n formset.save()\n messages.success(request, 'Successfully added a bunch of TODOs.',\n extra_tags='todos')\n return redirect(reverse(event_details, args=(event.get_ident(), )))\n else:\n messages.error(request, 'Fix errors below.')\n\n context = {\n 'title': 'Add standard TODOs to the event',\n 'formset': formset,\n 'helper': bootstrap_helper_inline_formsets,\n 'event': event,\n }\n return render(request, 'workshops/todos_add.html', context)", "def create_dummy_content(user_id):\n task = TodoItem(\n user=user_id,\n task=u'Find a shrubbery',\n tags=[u'quest', u'ni', u'knight'],\n due_date=datetime.utcnow() + timedelta(days=60),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Search for the holy grail',\n tags=[u'quest'],\n due_date=datetime.utcnow() - timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Recruit Knights of the Round Table',\n tags=[u'quest', u'knight', u'discuss'],\n due_date=datetime.utcnow() + timedelta(minutes=45),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Build a Trojan Rabbit',\n tags=[u'quest', u'rabbit'],\n due_date=datetime.utcnow() + timedelta(days=1),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Talk to Tim the Enchanter',\n tags=[u'quest', u'discuss'],\n due_date=datetime.utcnow() + timedelta(days=90),\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Defeat the Rabbit of Caerbannog',\n tags=[u'quest', u'rabbit'],\n due_date=None,\n )\n DBSession.add(task)\n task = TodoItem(\n user=user_id,\n task=u'Cross the Bridge of Death',\n tags=[u'quest'],\n due_date=None,\n )\n DBSession.add(task)", "def list_items(todofile, opt, args):\n def filt(item):\n \"\"\"Filter function based on options.\"\"\"\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result\n\n for item in filter(filt, todofile.fetch_items()):\n list_str = ['']\n if (item.done):\n list_str.append('X')\n elif (item.time is not None and item.time < datetime.datetime.now()):\n list_str.append('!')\n else:\n list_str.append('*')\n if(opt.list_id):\n list_str.append('{0:<3d}'.format(item.itemid))\n if(opt.list_date and item.time is not None):\n list_str.append(item.time.strftime('%c') + ' --')\n list_str.append(item.text)\n print ' '.join(list_str)", "def list_1_for_tests(db_setup, user_for_test):\n heading = \"test_1_heading\"\n display_order = 0\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO lists(user_id, heading, display_order, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (user_for_test[\"id\"], heading, display_order,)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(L) \n FROM( SELECT id, heading, display_order FROM lists WHERE heading = %s LIMIT 1)\n L\n \"\"\", (heading,)\n )\n\n test_list_json = db_setup.cur.fetchone()[0]\n return test_list_json", "def create_todo(todo_text, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return TodoItem.objects.create(todo_text=todo_text, pub_date=time)", "def test_creating_todo(todoApp, input):\n # Create new todo\n new_todo_input = todoApp.find_new_todo_input()\n print new_todo_input\n new_todo_input.send_keys(input, Keys.ENTER)\n\n # ASSERTION\n # Check whether the new todo exist in the todo list or not.\n todo = todoApp.find_todo(input)\n \n # Check the new todo status, it should active.\n assert todoApp.is_active_todo(todo)\n \n # Check the active todo count\n assert todoApp.count_active_todos() == '1 item left'", "def get_own_todos(current_user: models.User = Depends(get_current_user),\n \tdb: Session = Depends(get_db)):\n todos = blogcrud.get_user_todos(db, current_user.id)\n return todos", "def add_item(self, text):\n\t\tnew_todo = self.todolist.add(text)\n\t\tself.store.append((new_todo.id, text))", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "async def test_todo_feed_has_created_and_updated_items_for_modified_cleaning_jobs(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n res_page_1 = await authorized_client.get(\n app.url_path_for(\"feed:get-todo-feed-for-user\"),\n params={\"page_chunk_size\": 30},\n )\n assert res_page_1.status_code == status.HTTP_200_OK\n ids_page_1 = [feed_item[\"id\"] for feed_item in res_page_1.json()]\n todo_feeds = [TodoFeedItem(**feed_item) for feed_item in res_page_1.json()]\n for todo_feed in todo_feeds:\n assert todo_feed.as_task is True\n\n new_starting_date = res_page_1.json()[-1][\"updated_at\"]\n\n res_page_2 = await authorized_client.get(\n app.url_path_for(\"feed:get-todo-feed-for-user\"),\n params={\"starting_date\": new_starting_date, \"page_chunk_size\": 33},\n )\n assert res_page_2.status_code == status.HTTP_200_OK\n ids_page_2 = [feed_item[\"id\"] for feed_item in res_page_2.json()]\n todo_feeds_2 = [TodoFeedItem(**feed_item) for feed_item in res_page_2.json()]\n for todo_feed in todo_feeds_2:\n assert todo_feed.as_task is True\n\n # should have duplicate IDs for 13 update events an `is_create` event and an `is_update` event\n id_counts = Counter(ids_page_1 + ids_page_2)\n assert len([id for id, cnt in id_counts.items() if cnt > 1]) == 13", "def test_calendar_query_todo_alarm(self):\n raise SkipTest(\"test unimplemented\")", "def get_todo_data():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n\n result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n\n use_value = values[len(values)-1][0]\n\n use_value_list = []\n counter = 0\n start_index = 0\n for c in use_value:\n counter += 1\n if c == \",\":\n check_val = use_value[start_index:counter - 1]\n # change slice value to adjust when repeat items are allowed:\n if check_val not in use_value_list[:8]:\n use_value_list.append(check_val)\n start_index = counter\n elif counter == len(use_value)-1:\n use_value_list.append(use_value[start_index:])\n\n # Makes oldest at top, delete for oldest at bottom:\n use_value_list.reverse()\n\n return use_value_list", "def test_list_notes(self):\n pass", "def add_shared_items(shared_list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(shared_list_id)\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{shared_list_id}\")", "def get_todos(self):\n if self.is_new:\n # if its a new project then create the todo items from the yml\n # templates\n return self.get_yml_todos()\n else:\n # check for existing todos\n return self.get_db_todos()", "def add_shoppinglist(self, user_id, name):\n new_shoppinglist = ShoppingList(name)\n new_shoppinglist_details = new_shoppinglist.get_details()\n user = self.get_single_user(user_id)\n new_shoppinglist_details['id'] = len(user['shopping_lists']) + 1\n for item in user['shopping_lists']:\n if item['name'].lower() == name.lower():\n return \"Shopping list \" + str(name) + \" exits. Try editing it\"\n if new_shoppinglist_details['id'] == item['id']:\n new_shoppinglist_details['id'] = (\n new_shoppinglist_details['id'] + 1\n )\n user['shopping_lists'].append(new_shoppinglist_details)\n return \"Shopping list \" + str(name) + \" Created\"", "def test_todo(self):\n self.assertEqual(self.my_todo.state, \"T\")\n self.assertEqual(self.my_todo.due_date, date_today)\n self.assertEqual(self.my_todo.text, \"Call Mom\")\n self.assertEqual(str(self.my_todo), \"Call Mom\")", "def test_booklist_ordered_by_due_date(self):\n # Change all book's status to loan('o')\n for book in BookInstance.objects.all():\n book.status = 'o'\n book.save()\n\n # Login into page\n login = self.client.login(\n username='testuser1',\n password='1X<ISRUkw+tuK')\n response = self.client.get(reverse('my-borrowed'))\n\n # Check that user is logged in\n self.assertEqual(str(response.context['user']), 'testuser1')\n self.assertEqual(response.status_code, 200)\n\n # Confirm that only 10 items are displayed per page\n self.assertEqual(len(response.context['bookinstancelist']), 10)\n\n last_date = 0\n for book in response.context['bookinstancelist']:\n if last_date == 0:\n last_date = book.due_back\n else:\n self.assertTrue(last_date <= book.due_back)\n last_date = book.due_back", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def test_add(self):\n # Everything added will be deleted later in test_delete.\n first_name = 'Trevor'\n last_name = 'Harvey'\n entry_date = '04/19/2012'\n title = 'Test'\n minutes = 34\n notes = 'testing entries. and regex (555) 555-3425'\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)\n # second test add\n first_name = 'Nik'\n last_name = 'Silver'\n entry_date = '01/14/1827'\n title = 'random@mail.com'\n minutes = 34\n notes = 'This is an email test.'\n\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)", "def post(self, dnzo_user):\n from tasks_data.task_lists import add_task_list, get_task_list\n \n task_list_name = self.request.get('task_list_name', None)\n if not task_list_name:\n self.bad_request(\"Must provide task_list_name to create a new list\")\n return\n \n new_list = add_task_list(dnzo_user, task_list_name)\n if not new_list:\n self.bad_request(\"Could not add the new task list!\")\n return\n \n self.json_response(task_list=new_list.to_dict())", "def modifies_user_trips(trips):\n\n trips_list = []\n\n for trip in trips:\n if trip:\n start_date = trip.start_date.strftime(\"%a, %b %d, %Y\")\n end_date = trip.end_date.strftime(\"%a, %b %d, %Y\")\n trip_detail = (trip.trip_name, start_date, end_date, trip.notes, trip.trip_id)\n trips_list.append(trip_detail)\n else:\n return None\n\n return trips_list", "def task_created(user, friends, task_description):\n\n title = f\"{user.username}'s task\"\n body = f\"{task_description}\"\n\n for user in friends:\n if user.fir_push_notif_token is not None and user.task_notifs:\n notify_user(user, title, body)", "def to_do_3_for_tests(db_setup, list_2_for_tests):\n list_id = list_2_for_tests[\"id\"]\n title = \"toDoTest3Title\"\n desc = \"toDoTest3Desc\"\n\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO to_dos(list_id, title, description, due, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP,CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (list_id, title, desc)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(t) \n FROM( SELECT id, list_id, title, description, due FROM to_dos WHERE title = %s LIMIT 1)\n t\n \"\"\", (title,)\n )", "def test_new_entry_adds_to_list(testapp, set_auth_credentials, login_testcase):\n response = testapp.get(\"/journal/new-entry\")\n csrf_token = response.html.find(\"input\", {\"name\": \"csrf_token\"})\n csrf_token = csrf_token.attrs['value']\n\n post_params = {\n 'title': 'Learning Journal Title',\n 'body': 'So many things learned today.',\n 'csrf_token': csrf_token\n }\n\n response = testapp.post('/journal/new-entry', post_params, status=302)\n full_response = response.follow()\n new_title = full_response.html.find(id='journal-entry').a.text\n assert new_title == post_params['title']", "def to_do_2_for_tests(db_setup, list_1_for_tests):\n list_id = list_1_for_tests[\"id\"]\n title = \"toDoTest2Title\"\n desc = \"toDoTest2Desc\"\n\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO to_dos(list_id, title, description, due, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP,CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (list_id, title, desc)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(t) \n FROM( SELECT id, list_id, title, description, due FROM to_dos WHERE title = %s LIMIT 1)\n t\n \"\"\", (title,)\n )\n\n test_to_do_json = db_setup.cur.fetchone()[0]\n return test_to_do_json", "def add_list(self):\n the_list = models.List(user_id=1,\n list_name=self.test_list,\n description=self.test_list_desc)\n the_list.add()", "def add_item(self):\n\n self.todo_scroll_cell.add_item(f'{self.new_todo_textbox.get()}')", "def add_shoppingitems(self, user_id, shoppinglist_id, name, quantity):\n new_shoppingitem = ShoppingItem(name, quantity)\n new_shoppingitem_details = new_shoppingitem.get_details()\n user = self.get_single_user(user_id)\n for shopinglist in user['shopping_lists']:\n if shopinglist['id'] == int(shoppinglist_id):\n curr_shopinglist = shopinglist\n new_shoppingitem_details['id'] = (\n len(curr_shopinglist['items']) + 1)\n for item in curr_shopinglist['items']:\n if item['name'].lower() == name.lower():\n return \"Item \" + str(name) + \" exits. Try editing it\"\n if new_shoppingitem_details['id'] == item['id']:\n new_shoppingitem_details['id'] = (\n new_shoppingitem_details['id'] + 1\n )\n curr_shopinglist['items'].append(new_shoppingitem_details)\n return str(name) + \" has been added\"", "def autodel(): #i hate this code so much\n today, tasks = datetime.today(), []\n to_remove_indexes = []\n deleted_tasks = 0\n\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for i, task in enumerate(tasks):\n try:\n task = json.loads(task)\n except json.decoder.JSONDecodeError:\n return False, False\n if task['deadline'] == \"None\": #because i converted to string in adding\n continue\n dline = datetime.strptime(task['deadline'], \"%Y-%m-%d %H:%M:%S\")\n if dline < today and not task['no_del']:\n to_remove_indexes.append(i)\n deleted_tasks += 1\n\n for index in to_remove_indexes[::-1]:\n del tasks[index]\n \n with open(todofile, 'w') as todo:\n for task in tasks:\n todo.write(task)\n \n return deleted_tasks, True", "def add_todo(taskname, deadline, priority, reminder, deleteflag):\n autodel()\n task = {\n 'name': taskname,\n 'deadline': str(deadline),\n 'priority': priority,\n 'reminder': reminder,\n 'no_del': deleteflag\n }\n\n if not exists(task['name']):\n with open(todofile, 'a') as todo:\n try:\n jdump = json.dumps(task) + '\\n'\n todo.write(jdump)\n return 0\n except json.decoder.JSONDecodeError:\n return 1", "def test_add(self):\n # add a todo\n self.add(title=\"Sample task todo\", description=\"for sample\", state=\"todo\")\n task = Task.query.filter_by(title='Sample task todo').first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'todo')\n\n # add a doing\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title=\"Sample task doing\").first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'doing')\n\n # add a done\n self.add(title=\"Sample task done\", description=\"for sample\", state=\"done\")\n task = Task.query.filter_by(title='Sample task done').first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'done')", "def to_do_1_for_tests(db_setup, list_1_for_tests):\n list_id = list_1_for_tests[\"id\"]\n title = \"toDoTest1Title\"\n desc = \"toDoTest1Desc\"\n\n db_setup.cur.execute(\n \"\"\"\n INSERT INTO to_dos(list_id, title, description, due, created_at, updated_at) VALUES(%s, %s, %s, CURRENT_TIMESTAMP,CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) RETURNING *\n \"\"\", (list_id, title, desc)\n )\n db_setup.con.commit()\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(t) \n FROM( SELECT id, list_id, title, description, due FROM to_dos WHERE title = %s LIMIT 1)\n t\n \"\"\", (title,)\n )\n\n test_to_do_json = db_setup.cur.fetchone()[0]\n return test_to_do_json", "def push_and_reset(self):\n\n self.add_item()\n self.new_todo_textbox.clear()", "async def test_todo_feed_can_paginate_correctly(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n res_page_1 = await authorized_client.get(app.url_path_for(\"feed:get-todo-feed-for-user\"))\n assert res_page_1.status_code == status.HTTP_200_OK\n todo_feed_page_1 = res_page_1.json()\n assert len(todo_feed_page_1) == 20\n ids_page_1 = set(feed_item[\"id\"] for feed_item in todo_feed_page_1)\n new_starting_date = todo_feed_page_1[-1][\"event_timestamp\"]\n\n res_page_2 = await authorized_client.get(\n app.url_path_for(\"feed:get-todo-feed-for-user\"),\n params={\"starting_date\": new_starting_date, \"page_chunk_size\": 20},\n )\n assert res_page_2.status_code == status.HTTP_200_OK\n todo_feed_page_2 = res_page_2.json()\n assert len(todo_feed_page_2) == 20\n ids_page_2 = set(feed_item[\"id\"] for feed_item in todo_feed_page_2)\n\n assert ids_page_1 != ids_page_2", "def test_add(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n task = r.get(\"ToDo\")\n self.assertTrue(task, \"No such entry in DB. Adding failed.\")", "def do_item_add(self, arg):\n try:\n add_item = arg[\"<item_name>\"]\n add_item_str = \" \".join(add_item)\n app.ToDoApp.to_add_item(add_item_str, add_item = True)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def test_list_playlist_ordering_created_on(self):\n user = factories.UserFactory()\n\n # In this org, the user is not a member but he has access to this playlist.\n org_1 = factories.OrganizationFactory()\n playlist_1 = factories.PlaylistFactory(\n lti_id=\"playlist#one\", organization=org_1, title=\"First playlist\"\n )\n factories.PlaylistAccessFactory(\n playlist=playlist_1, user=user, role=models.ADMINISTRATOR\n )\n # user has no access on this playlist\n factories.PlaylistFactory(\n lti_id=\"playlist#two\", organization=org_1, title=\"Second playlist\"\n )\n\n # In this org, the user is not a member but he has access to this playlist.\n org_2 = factories.OrganizationFactory()\n playlist_3 = factories.PlaylistFactory(\n lti_id=\"playlist#three\", organization=org_2, title=\"Third playlist\"\n )\n factories.PlaylistAccessFactory(\n playlist=playlist_3, user=user, role=models.ADMINISTRATOR\n )\n\n # Orphan playlist, not in an organization, the use has access to.\n playlist_4 = factories.PlaylistFactory(\n lti_id=\"playlist#four\", title=\"Fourth playlist\"\n )\n factories.PlaylistAccessFactory(\n playlist=playlist_4, user=user, role=models.ADMINISTRATOR\n )\n\n jwt_token = UserAccessTokenFactory(user=user)\n\n response = self.client.get(\n \"/api/playlists/?ordering=created_on\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json()[\"count\"], 3)\n self.assertEqual(\n response.json()[\"results\"],\n [\n {\n \"consumer_site\": {\n \"id\": str(playlist_1.consumer_site.id),\n \"domain\": playlist_1.consumer_site.domain,\n \"name\": playlist_1.consumer_site.name,\n },\n \"created_by\": None,\n \"created_on\": playlist_1.created_on.isoformat().replace(\n \"+00:00\", \"Z\"\n ),\n \"duplicated_from\": None,\n \"id\": str(playlist_1.id),\n \"is_portable_to_consumer_site\": False,\n \"is_portable_to_playlist\": True,\n \"is_public\": False,\n \"lti_id\": \"playlist#one\",\n \"organization\": {\n \"id\": str(org_1.id),\n \"name\": org_1.name,\n },\n \"portable_to\": [],\n \"retention_duration\": None,\n \"title\": \"First playlist\",\n \"users\": [str(user.id)],\n \"can_edit\": True,\n },\n {\n \"consumer_site\": {\n \"id\": str(playlist_3.consumer_site.id),\n \"domain\": playlist_3.consumer_site.domain,\n \"name\": playlist_3.consumer_site.name,\n },\n \"created_by\": None,\n \"created_on\": playlist_3.created_on.isoformat().replace(\n \"+00:00\", \"Z\"\n ),\n \"duplicated_from\": None,\n \"id\": str(playlist_3.id),\n \"is_portable_to_consumer_site\": False,\n \"is_portable_to_playlist\": True,\n \"is_public\": False,\n \"lti_id\": \"playlist#three\",\n \"organization\": {\n \"id\": str(org_2.id),\n \"name\": org_2.name,\n },\n \"portable_to\": [],\n \"retention_duration\": None,\n \"title\": \"Third playlist\",\n \"users\": [str(user.id)],\n \"can_edit\": True,\n },\n {\n \"consumer_site\": {\n \"id\": str(playlist_4.consumer_site.id),\n \"domain\": playlist_4.consumer_site.domain,\n \"name\": playlist_4.consumer_site.name,\n },\n \"created_by\": None,\n \"created_on\": playlist_4.created_on.isoformat().replace(\n \"+00:00\", \"Z\"\n ),\n \"duplicated_from\": None,\n \"id\": str(playlist_4.id),\n \"is_portable_to_consumer_site\": False,\n \"is_portable_to_playlist\": True,\n \"is_public\": False,\n \"lti_id\": \"playlist#four\",\n \"organization\": None,\n \"portable_to\": [],\n \"retention_duration\": None,\n \"title\": \"Fourth playlist\",\n \"users\": [str(user.id)],\n \"can_edit\": True,\n },\n ],\n )", "def test_post_foods_list(self):\n pass", "def test_was_published_recently_with_old_todo(self):\n time = timezone.now() - datetime.timedelta(days=1, seconds=1)\n future_todo = TodoItem(pub_date=time)\n self.assertIs(future_todo.was_published_recently(), False)", "def get_todo_list():\n\n # assume that a \"h264\" encoded file is complete\n return models.LibraryItem.objects.filter(h264=False)", "def test_list_past_meeting_files(self):\n pass", "def _add_schedule_items(self):\n\n schedules = [\n {\n 'start_time': '9:30 AM',\n 'end_time': '10:00 AM',\n 'title': 'Daily Scrum',\n 'location': 'Hogwarts',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '10:30 AM',\n 'end_time': '11:00 AM',\n 'title': 'Engineering Interview',\n 'location': 'Narnia',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '12:00 PM',\n 'end_time': '12:30 PM',\n 'title': 'Lunch',\n 'location': 'Kitchen',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n {\n 'start_time': '2:00 PM',\n 'end_time': '2:30 PM',\n 'title': 'Workout',\n 'location': 'Gym',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n },\n ]\n\n recurring_item_data = {\n 'start_time': '3:00 PM',\n 'end_time': '3:30 PM',\n 'title': 'Recurring thing',\n 'location': 'asdf',\n 'day': self.day,\n 'user': self.user.user.rolllistuser,\n }\n\n schedule_items = []\n\n schedule_dict = {i['start_time']: i for i in schedules}\n\n for schedule in schedules:\n save_data = schedule\n save_data['start_time'] = get_relevant_time_id(schedule['start_time'])\n save_data['end_time'] = get_relevant_time_id(schedule['end_time'])\n new_schedule_item = ScheduleItem(**save_data)\n new_schedule_item.save()\n schedule_items.append(new_schedule_item)\n\n save_data = recurring_item_data\n save_data['start_time'] = get_relevant_time_id(recurring_item_data['start_time'])\n save_data['end_time'] = get_relevant_time_id(recurring_item_data['end_time'])\n new_schedule_item = ScheduleItem(**save_data)\n new_schedule_item.save()\n new_schedule_item.make_recurring([0])\n schedule_items.append(new_schedule_item)\n\n return schedule_items, schedule_dict", "def test_list_past_meeting_polls(self):\n pass", "def create_toptenlist(self, user_ref, index):\n self.client.force_authenticate(user=getattr(self, user_ref))\n response = self.client.post(create_list_url, toptenlist_data_1, format='json')\n toptenlist_id = json.loads(response.content)['id']\n\n toptenlist_ref = 'toptenlist_' + str(index) # refer to toptenlist by self.toptenlist_1 etc\n\n # this allows us to reference the originial toptenlist from self\n # self.toptenlist_1 etc\n # this is not safe for properties like name, but is safe for getting toptenlist and toptenitem id because these do not change\n setattr(self, toptenlist_ref, TopTenList.objects.get(pk=toptenlist_id))\n\n # the request should succeed\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.client.logout()", "def test_add_followers(self):\n pass", "async def todo(self, ctx):\n\n cursor = await db.execute(\"Select Thing from Todo where MemberID = ?\", (ctx.author.id,))\n result = await cursor.fetchall()\n\n if not result:\n return await send_embed(ctx, \"You do not have anything on your todo list.\", negative=True)\n\n result = [i[0] for i in result]\n\n embeds = []\n description = []\n\n for index, string in enumerate(result, start=1):\n\n description.append(f\"{index}. {string}\")\n\n if index % 10 == 0 or index == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n description=\"\\n\".join(description)\n )\n embed.set_author(name=str(ctx.author), icon_url=str(ctx.author.avatar_url))\n embeds.append(embed)\n description = []\n\n await self.bot.paginate(ctx, embeds)", "def NewItems(self) -> _n_1_t_7:", "def add_jira_entries(config, date, dry_run, economic):\n if date is not None:\n jira = Jira(config.items('Jira'))\n for task in jira.get_tasks():\n if task:\n economic.add_time_entry(task, dry_run)", "def solve_one_day_todo_events(self, todo_items_list):\n seconds_until_tomorrow = self.seconds_left_util_tomorrow()\n if len(todo_items_list) < 1:\n # indicates that today has no todo events, then cruise will sleep until tomorrow\n # time.sleep(seconds_until_tomorrow + 1)\n # seems can not directly sleep to tomorrow, directly continue\n time.sleep(10)\n else:\n # add now time to calculate intervals\n now = datetime.datetime.now().replace(microsecond=0, second=0)\n now_item = {\n 'time': now\n }\n todo_items_list.append(now_item)\n todo_items_list = sorted(todo_items_list, key=lambda x: x['time'])\n todo_still_items = [i for i in todo_items_list if i['time'] >= now]\n if len(todo_still_items) > 1:\n intervals = [(todo_still_items[i]['time'] - todo_still_items[i - 1]['time']).seconds for i in\n range(1, len(todo_still_items))]\n print('[TODO CRUISER] still todo intervals: ', intervals)\n for i, interval in enumerate(intervals):\n print('[TODO CRUISER] start solve event {}, interval {}'.format(i, interval))\n\n # I am changing time.sleep(interval) into following one, which make sense more\n # so that sleep will interrupt when local_file changed,\n for t in range(interval):\n if self.changes_event_handler.CHANGE_FLAG:\n # if detected changes, then return and reset flag to False\n self.changes_event_handler.CHANGE_FLAG = False\n print('[IMPORTANT] changes detected, solve today return.')\n return\n else:\n time.sleep(1)\n todo_item = todo_still_items[i + 1]\n try:\n print(' ... try to execute mission.')\n class_name = todo_item['class']\n func = todo_item['func']\n args = todo_item['args']\n\n c_obj = globals()[class_name](self.msg_executor)\n func = getattr(c_obj, func)\n func(*args)\n print('[CHEER] time mission executed!!!!!!')\n except KeyError:\n pass\n # sleep more 1 minute\n time.sleep(61)\n else:\n pass\n time.sleep(50)\n # time.sleep(seconds_until_tomorrow + 1)", "def test_legacy_items_at_day_1(manager):\n manager.update()\n compare_results_attrs(manager.items, fixtures.FIXTURES[1])", "def reminders_soon(request):\n now = timezone.now()\n soon = now + timedelta(days=2)\n return Task.objects.filter(\n user=request.user, reminder__lt=soon, reminder_seen=False, done=False).exclude(folder='trash')", "def post_add_todo_request(self):\n response = requests.post(\n url=self.url,\n headers=self.default_headers,\n json=self.habitica_todo.to_json_dict()\n )\n return get_data_or_exit(response)", "def test_task_list():\n # Fake pyramid request, useful for testing.\n request = testing.DummyRequest()\n\n pytest.fail('Not implemented yet.')", "def todos(self):\r\n return Todos(self)", "def create_list(self, name) -> TodoList:\n t = TodoList(name, [])\n if name in self.lists:\n raise HTTPException(409, f\"TODO list with name {name} already exists\")\n self.lists[self.__to_key(name)] = t\n return t", "def test_sys_favourites_and_dates(self):\n\n with ts.SetupDbAndCredentials() as s:\n args = [\"--favourites-only\", \"--max-retries\", \"6\", \"--max-threads\", \"2\"]\n s.test_setup(\n \"test_sys_favourites\", args=args, trash_files=True, trash_db=True\n )\n s.gp.start(s.parsed_args)\n\n db = LocalData(s.root)\n\n # Total of 1 out of media items\n db.cur.execute(\"SELECT COUNT() FROM SyncFiles\")\n count = db.cur.fetchone()\n self.assertEqual(1, count[0])\n\n name = s.root / \"photos/2017/09/IMG_2117.JPG\"\n date = datetime.fromtimestamp(os.path.getmtime(str(name)))\n expected = datetime(2017, 9, 26, 15, 29, 44)\n self.assertEqual(\n expected, date.replace(microsecond=0), \"Modify date not set correctly\"\n )\n # TODO: this fails on Github Actions - probably its the filesystem\n # rather than the OS\n #\n # if os.name == \"nt\":\n # date = datetime.fromtimestamp(os.path.getctime(name))\n # expected = datetime(2017, 9, 26, 15, 29, 44)\n # self.assertEqual(\n # expected,\n # date.replace(microsecond=0),\n # \"Create date not set correctly\",\n # )", "def interactive(todofile):\n tmpfile = tempfile.NamedTemporaryFile(suffix='.txt', prefix='todo-',\n delete=False)\n print >> tmpfile\n print >> tmpfile , '# Todo items should be formed as <date> -- <todo>'\n print >> tmpfile , '# The date field is optional.'\n print >> tmpfile , '# Lines starting with # are ignored.'\n tmpfile.close()\n subprocess.call(['sensible-editor', tmpfile.name])\n with open(tmpfile.name) as writtenfile:\n add_items(todofile, writtenfile.readlines())\n os.remove(tmpfile.name)", "def test_can_start_a_list_and_retrieve_it_later(self):\n self.browser.get('http://localhost:8000')\n\n # Ela notou que o título e o cabeçalho da página diz Listas de Tarefas\n self.assertIn('Listas de Tarefas', self.browser.title)\n self.fail('Finish the test!')\n\n # Ela é imediatamente convidada a colocar uma tarefa na lista\n\n # Ela digita \"Comprar coxinhas\" em uma caixa de texto (Edith é uma\n # PyLady)\n\n # Quando ela tecla Enter, a página atualiza, e agora a página lista\n # \"1: Comprar coxinha\" como um item em uma lista de tarefas\n\n # Ainda tem uma caixa de texto convidando-a a adicionar um outro item.\n # Ela digita \"Comer as coxinhas\" (Edith é muito metódica)\n\n # A página atualiza novamente, e agora mostra ambos os itens na lista\n # dela\n\n # Edith se pergunta se o site lembrará da sua lista. Então ela vê que o\n # site gerou uma URL única para ela - Tem um texto explicativo para\n # esse efeito.\n\n # Ela visita a URL - Sua lista de tarefas ainda está lá.", "def add_element_function(listt):\n\n\tservice_name = str(input(\"Enter web site/app name: \"))\n\tuser_name = str(input(\"Enter user name: \"))\n\tuser_password = str(input(\"Enter user password: \"))\n\n\tnew_item = {\n\t\t\"service\": service_name, \n\t\t\"user\": user_name,\n\t\t\"password\": user_password\n\t\t}\n\n\tlistt.append(new_item)\n\tprint(\"Status: Element added\")\n\tprint(f\"Current elements: {len(listt)}\")\n\tprint()\n\treturn listt", "def add_list(request) -> HttpResponse:\n\n # Only staffers can add lists, regardless of TODO_STAFF_USER setting.\n if not request.user.is_staff:\n raise PermissionDenied\n\n if request.POST:\n form = AddTaskListForm(request.user, request.POST)\n if form.is_valid():\n try:\n newlist = form.save(commit=False)\n newlist.slug = slugify(newlist.name, allow_unicode=True)\n newlist.save()\n messages.success(request, \"A new list has been added.\")\n return redirect(\"todo:lists\")\n\n except IntegrityError:\n messages.warning(\n request,\n \"There was a problem saving the new list. \"\n \"Most likely a list with the same name in the same group already exists.\",\n )\n else:\n if request.user.groups.all().count() == 1:\n # FIXME: Assuming first of user's groups here; better to prompt for group\n form = AddTaskListForm(request.user, initial={\"group\": request.user.groups.all()[0]})\n else:\n form = AddTaskListForm(request.user)\n\n context = {\"form\": form}\n\n return render(request, \"todo/add_list.html\", context)", "def test_list_user(self):\n pass", "def add_task(self, task):\n\n # The pyrtm module is dynamic.\n # pylint: disable=no-member\n\n added = self.rtm.tasks.add(timeline=self.timeline,\n name=task['name'], list_id=self.list_id, parse=0)\n\n # TODO: record undoable transactions and undo them upon kb interrupt\n #if added.transaction.undoable == \"1\":\n #self.transactions.append(added.transaction.id)\n\n args = dict(\n timeline = self.timeline,\n list_id = self.list_id,\n taskseries_id = added.list.taskseries.id,\n task_id = added.list.taskseries.task.id,\n )\n\n if task.get('tags', None):\n # Should this be setTags?\n self.rtm.tasks.addTags(tags=','.join(task['tags']), **args)\n\n if task.get('due_date', None):\n self.rtm.tasks.setDueDate(due=task['due_date'],\n # TODO: Can we determine has_due_time?\n has_due_time=1,\n # We're using iso8601 so we don't need them to be specially parsed.\n parse=0,\n **args)\n\n if task.get('estimated', None):\n self.rtm.tasks.setEstimate(estimate=task['estimated'], **args)\n\n if task.get('priority', None):\n self.rtm.tasks.setPriority(priority=task['priority'], **args)\n\n if task.get('repeat', None):\n self.rtm.tasks.setRecurrence(repeat=task['repeat'], **args)\n\n if task.get('notes', None):\n if isinstance(task['notes'], list):\n notes = task['notes']\n else:\n notes = [ task['notes'] ]\n for note in notes:\n self.rtm.tasks.notes.add(note_title=note, note_text=note, **args)\n\n if task.get('url', None):\n self.rtm.tasks.setURL(url=task['url'], **args)\n\n # do the status changes last\n if task.get('completed', None):\n self.rtm.tasks.complete(**args)\n\n if task.get('deleted', None):\n self.rtm.tasks.delete(**args)\n\n return added", "def do_list(self, arg):\n try:\n cprint (\"Here are your todo lists: \\n\", 'blue')\n app.ToDoApp.to_view_todo()\n\n except ValueError as e:\n cprint(e, 'red')", "def add_list(\n cls,\n name,\n user_id\n ):\n lists_collection = taskify['lists']\n new_list = {\n \"name\": name,\n \"user_id\": ObjectId(user_id)\n }\n list_id = lists_collection.insert_one(new_list).inserted_id\n return new_list, list_id", "async def test_todo_feed_can_paginate_correctly_two(\n self,\n *,\n app: FastAPI,\n authorized_client: AsyncClient,\n test_list_of_new_and_updated_todos: List[TodoInDB],\n ) -> None:\n starting_date = datetime.datetime.now() + datetime.timedelta(minutes=10)\n combos = []\n for chunk_size in [25, 15, 10]:\n res = await authorized_client.get(\n app.url_path_for(\"feed:get-todo-feed-for-user\"),\n params={\"starting_date\": starting_date, \"page_chunk_size\": chunk_size},\n )\n assert res.status_code == status.HTTP_200_OK\n page_json = res.json()\n assert len(page_json) == chunk_size\n id_and_event_combo = set(f\"{item['id']}-{item['event_type']}\" for item in page_json)\n combos.append(id_and_event_combo)\n starting_date = page_json[-1][\"event_timestamp\"]\n # ensure all non of the items in any response exist in any other response.\n length_of_all_id_combos = sum(len(combo) for combo in combos)\n assert len(set().union(*combos)) == length_of_all_id_combos", "def ShowCurrentItemsInList(list_of_rows):\r\n print(\"******* The current items ToDo are: *******\")\r\n for row in list_of_rows:\r\n print(row.product_name + \" (\" + str(row.product_price) + \")\")\r\n print(\"*******************************************\")\r\n print() # Add an extra line for looks\r", "def menu_loop(todo_list, save_file_location):\r\n show_hidden = False\r\n selection = 0\r\n invalid_input = False\r\n while selection != 6:\r\n if invalid_input:\r\n invalid_input = False\r\n else:\r\n print_list(save_file_location, todo_list, True, show_hidden)\r\n divider(137 + 17) # Length of prompt statement below\r\n list_status = check_list_status(todo_list)\r\n if list_status == 0: # No Issues\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 2 for \"\r\n \"Remove Item, 3 for Edit Item, \"\r\n \"4 for Mark Item Complete, \"\r\n \"5 for Toggle Hidden, and 6 for \"\r\n \"Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, and 6 \"\r\n \"for Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n else: # Entirely Hidden List\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 5 for \"\r\n \"Toggle Hidden, and 6 for Exit, \"\r\n \"7 for Concept Demonstration\\n\"))\r\n # Uses the clean_input function above to get a number from the\r\n # user, converting it to an int so a decimal won't return an\r\n # invalid input in the following steps\r\n print(\"\") # Blank Print statement to add an extra blank line after\r\n # user input before displaying response\r\n if selection == 1: # Add Item - modify the list variable, then save\r\n # to file\r\n add_item(todo_list)\r\n elif selection == 2: # Remove Item - modify the list variable, then\r\n # save to file\r\n if list_status == 0:\r\n remove_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to remove\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to remove\")\r\n elif selection == 3: # Edit Item - modify the list variable, then save\r\n # to file\r\n if list_status == 0:\r\n edit_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to edit\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to edit\")\r\n elif selection == 4: # Mark Item Complete - modify the list variable,\r\n # then save to file\r\n if list_status == 0:\r\n mark_complete(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to mark complete\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to mark \"\r\n \"complete\")\r\n elif selection == 5: # Show Hidden - modify the list variable, then\r\n # save to file\r\n if list_status == 0 or list_status == 2:\r\n if show_hidden:\r\n print(\"No longer showing hidden items\")\r\n show_hidden = False\r\n else:\r\n print(\"Now showing hidden items\")\r\n show_hidden = True\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to show or \"\r\n \"hide\")\r\n elif selection == 6: # Exit Program\r\n print(\"Now Closing\")\r\n elif selection == 7: # Extra section to demonstrate proficiency with\r\n # topics covered in class - Sprint 1\r\n concept_demonstration()\r\n else:\r\n invalid_input = True\r\n print(\"Invalid Input\\nPlease Try Again\")", "def get_next_item(todo_list):\n next_item = {}\n\n try:\n if len(todo_list) > 0:\n next_item = todo_list[0]\n except next_item:\n print(\"next_item could not be created\")\n\n return next_item", "def read_todo_file(self):\n\n todo = []\n in_progress = []\n done = []\n if os.path.exists('TODO.txt'):\n todo_fp = open('TODO.txt', 'r')\n state = 0\n line = todo_fp.readline()\n while line:\n line = line.strip()\n if state == 0:\n if line == '__IN_PROGRESS__':\n state = 1\n elif len(line) > 1:\n todo.append(line)\n elif state == 1:\n if line == '__DONE__':\n state = 2\n elif len(line) > 1:\n in_progress.append(line)\n elif state == 2:\n if len(line) > 1:\n done.append(line)\n line = todo_fp.readline()\n todo_fp.close()\n self.todo_scroll_cell.add_item_list(todo)\n self.in_progress_scroll_cell.add_item_list(in_progress)\n self.done_scroll_cell.add_item_list(done)", "def show_outstanding(todo_list):\n outstanding_todo_list = []\n\n try:\n for item in todo_list:\n if not item['completed']:\n outstanding_todo_list.append(item)\n except outstanding_todo_list:\n print(\"Issue with outstanding list\")\n except todo_list:\n print(\"Issue with todo_list in show_outstanding\")\n\n return outstanding_todo_list", "async def remindlist(self, ctx):\r\n id = str(ctx.author.id)\r\n if id not in self.bot.data.save['reminders'] or len(self.bot.data.save['reminders'][id]) == 0:\r\n await ctx.reply(embed=self.bot.util.embed(title=\"Reminder Error\", description=\"You don't have any reminders\", color=self.color))\r\n else:\r\n embed = discord.Embed(title=\"{}'s Reminder List\".format(ctx.author.display_name), color=self.color)\r\n embed.set_thumbnail(url=ctx.author.avatar_url)\r\n for i in range(0, len(self.bot.data.save['reminders'][id])):\r\n embed.add_field(name=\"#{} ▫️ {:%Y/%m/%d %H:%M} JST\".format(i, self.bot.data.save['reminders'][id][i][0]), value=\"[{}](https://discordapp.com/channels/{})\".format(self.bot.data.save['reminders'][id][i][1], self.bot.data.save['reminders'][id][i][2]), inline=False)\r\n await ctx.reply(embed=embed)", "def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False", "def test_was_published_recently_with_future_todo(self):\n time = timezone.now() + datetime.timedelta(days=30)\n future_todo = TodoItem(pub_date=time)\n self.assertIs(future_todo.was_published_recently(), False)", "def add_list(action, user):\n \n userprofile = user.get_profile()\n \n board = userprofile.get_board(action['boardId'])\n \n # Create the list\n l = List()\n l.title = action['what']['title']\n l.color = action['what']['color']\n l.creator = user\n l.save()\n \n # Add the list to the user's lists\n \n board.lists.append(l.id)\n userprofile.save()\n \n return l;", "def todolists(self):\r\n return tdl.GlobalTodolists(self)", "def create_item(self, user: User, **kwargs) -> None:", "def save_todo_file(self):\n\n if os.path.exists('TODO.txt'):\n os.remove('TODO.txt')\n todo_fp = open('TODO.txt', 'w')\n todo_items = self.todo_scroll_cell.get_item_list()\n in_progress_items = self.in_progress_scroll_cell.get_item_list()\n done_items = self.done_scroll_cell.get_item_list()\n for item in todo_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__IN_PROGRESS__' + '\\n')\n for item in in_progress_items:\n todo_fp.write(item + '\\n')\n todo_fp.write('__DONE__' + '\\n')\n for item in done_items:\n todo_fp.write(item + '\\n')\n todo_fp.close()\n self.master.show_message_popup('Saved', 'Your TODO list has been saved!')", "def todolist(self, todolist_id):\r\n return tdl.Todolist(self, todolist_id)", "def testNotToday(self):\n api.addReminders('patient2', datetime.datetime.now())\n\n self.assertEqual(testDatabase.Reminder.select().where(testDatabase.Reminder.username == 'patient2').count(),0)", "def todo_list_view(request):\n\n context = {}\n queryset = Todo.objects.filter(user=request.user)\n context['lists'] = queryset\n return render(request,'todos/index.html', context)" ]
[ "0.7035588", "0.63662964", "0.62633353", "0.5987224", "0.59500104", "0.5935984", "0.59221303", "0.5910842", "0.5875174", "0.5867453", "0.5842675", "0.5828082", "0.5801755", "0.5759313", "0.5746675", "0.5741539", "0.5722921", "0.5661359", "0.56570214", "0.5634428", "0.5626967", "0.5617959", "0.5577509", "0.556225", "0.55153346", "0.5497402", "0.5496168", "0.54508454", "0.5431274", "0.5420483", "0.5409948", "0.5402746", "0.53548074", "0.535331", "0.53523827", "0.5351072", "0.5347329", "0.533829", "0.53269196", "0.53234863", "0.5301676", "0.5297421", "0.5278597", "0.52749825", "0.52603114", "0.52468836", "0.52447826", "0.52264804", "0.5218741", "0.5207555", "0.51888096", "0.51826155", "0.5178743", "0.5177551", "0.5177006", "0.5171515", "0.51701045", "0.51505774", "0.5148564", "0.51441586", "0.51436764", "0.513748", "0.5127443", "0.51159793", "0.51134217", "0.51113135", "0.51093173", "0.5109232", "0.510264", "0.50963897", "0.50932866", "0.50930566", "0.50923365", "0.5090775", "0.50731367", "0.5065033", "0.50561816", "0.5048078", "0.5045886", "0.5038569", "0.5038396", "0.5027109", "0.50200504", "0.50198966", "0.5017485", "0.50082916", "0.50070244", "0.50047994", "0.50031173", "0.5000515", "0.49993142", "0.49985147", "0.49966738", "0.49923193", "0.4971043", "0.49676576", "0.49641204", "0.4961894", "0.49616835", "0.49547812" ]
0.73899007
0
Helper function adding some known schedule items for the test user
def _add_schedule_items(self): schedules = [ { 'start_time': '9:30 AM', 'end_time': '10:00 AM', 'title': 'Daily Scrum', 'location': 'Hogwarts', 'day': self.day, 'user': self.user.user.rolllistuser, }, { 'start_time': '10:30 AM', 'end_time': '11:00 AM', 'title': 'Engineering Interview', 'location': 'Narnia', 'day': self.day, 'user': self.user.user.rolllistuser, }, { 'start_time': '12:00 PM', 'end_time': '12:30 PM', 'title': 'Lunch', 'location': 'Kitchen', 'day': self.day, 'user': self.user.user.rolllistuser, }, { 'start_time': '2:00 PM', 'end_time': '2:30 PM', 'title': 'Workout', 'location': 'Gym', 'day': self.day, 'user': self.user.user.rolllistuser, }, ] recurring_item_data = { 'start_time': '3:00 PM', 'end_time': '3:30 PM', 'title': 'Recurring thing', 'location': 'asdf', 'day': self.day, 'user': self.user.user.rolllistuser, } schedule_items = [] schedule_dict = {i['start_time']: i for i in schedules} for schedule in schedules: save_data = schedule save_data['start_time'] = get_relevant_time_id(schedule['start_time']) save_data['end_time'] = get_relevant_time_id(schedule['end_time']) new_schedule_item = ScheduleItem(**save_data) new_schedule_item.save() schedule_items.append(new_schedule_item) save_data = recurring_item_data save_data['start_time'] = get_relevant_time_id(recurring_item_data['start_time']) save_data['end_time'] = get_relevant_time_id(recurring_item_data['end_time']) new_schedule_item = ScheduleItem(**save_data) new_schedule_item.save() new_schedule_item.make_recurring([0]) schedule_items.append(new_schedule_item) return schedule_items, schedule_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_recurring_schedule(self):\n pass", "def test_list_schedules(self):\n pass", "def _create_schedules(self):\n\n ''''''", "def add_schedule(doc_user, date, schedule, logger):\n #my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n if my_calendar == None:\n logger.info('{}: calendar start'.format(doc_user[\"user_id\"]))\n my_calendar = {\"User\": doc_user[\"_id\"],\n \"schedules\": []}\n col_calendar.insert_one(my_calendar)\n\n if not schedule:\n return False\n\n if len(schedule) > 5:\n logger.info('{}: day schedules are already full'.format(\n doc_user[\"user_id\"]))\n return False\n\n ret = 0\n for s in schedule:\n my_calendar[\"schedules\"] += [{\"date\": date,\n \"events_list\": [s]}]\n logger.info('{}: {} added into schedule'.format(\n date, s))\n ret += 1\n\n if ret >= 1:\n col_calendar.find_one_and_replace({\"User\": doc_user[\"_id\"]}, my_calendar)\n\n return True", "def add_schedule(self):\r\n\r\n # Take the schedule entires from TOML file\r\n entries = self.cfg.get('payload',{}).get('schedule')\r\n # Check for valid entires\r\n if entries:\r\n # Construct payload \r\n for payload in entries:\r\n # Parse schedule payload\r\n ready = self.construct_payload(parse = copy.deepcopy(payload), dele = 'link')\r\n # Check the entry vs a json schema\r\n check.check_entry(path='schemas/schedule.json', test=ready)\r\n # Post request\r\n b1 = self.add_post(ready, API.url_schl, self.schedules)\r\n if 'link' in payload.keys() and payload['link'] != [{}]:\r\n b2 = self.link(self.schedules[-1].get('id'))\r\n else:\r\n return b1\r\n if b2 != None:\r\n return b1 and b2\r\n else:\r\n return False", "def _add_games_to_schedule(self, schedule, game_type, year):\n for item in schedule:\n game = Game(item, game_type, year)\n self._games.append(game)", "def test_add_schedule(self):\n body = Schedule()\n response = self.client.open('/v1/schedule',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def add(self, story, items_to_schedule):\n url = self._build_url(story)\n arguments = self._argument_converter(\n data={\n 'items': items_to_schedule\n }\n )\n\n result = self._client.post(url, **arguments)\n return result", "def create( self ):\r\n for rsrc in self.ee.getRsrcs( ):\r\n self.schedule[rsrc.getid( )] = [ ]", "def test_remove_recurring_schedule(self):\n pass", "def add_scheduled_spirit(self, schedule_info):\n\n raise NotImplementedError", "def create_schedule_team(self, schedule):\r\n stub_user = self.find(\"users\", \"Stub User\", attribute=\"name\")\r\n schedule_json = {\r\n \"name\": schedule['name'],\r\n \"type\": \"schedule\",\r\n \"time_zone\": \"Pacific/Auckland\",\r\n \"schedule_layers\": [\r\n {\r\n \"start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_virtual_start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_turn_length_seconds\": 86400,\r\n \"users\": [\r\n {\r\n \"user\": {\r\n \"type\": \"user\",\r\n \"id\": stub_user[\"id\"]\r\n }\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n try:\r\n self.rpost(\"users\", json=schedule_json)\r\n except PDClientError as e:\r\n raise e", "def _use_default_schedule(self):\n def gen_day():\n dl = []\n ll = [-1, '', -1, '', '']\n for i in range(8):\n dl.append(ll[:])\n rl = []\n for i in range(4):\n rl.append(dl[:])\n return rl\n\n self.schedule = {\n 'current_week': [1, date.today().isocalendar()[1]],\n 'lessons_time': [\n ['8:00', '9:35'],\n ['9:45', '11:20'],\n ['11:40', '13:15'],\n ['13:25', '15:00'],\n ['15:20', '16:55'],\n ['17:05', '18:40'],\n ['18:45', '20:20'],\n ['20:25', '22:00']\n ],\n 'schedule': {\n 'Monday': gen_day(),\n 'Tuesday': gen_day(),\n 'Wednesday': gen_day(),\n 'Thursday': gen_day(),\n 'Friday': gen_day(),\n 'Saturday': gen_day()\n },\n 'subgroup': 0\n }", "def add_to_calender(service, username): \n colors = service.colors().get().execute()\n d_and_t = df.get_add_to_calender_input(argv[1], argv[2])\n now = datetime.datetime.now()\n if d_and_t == None:\n return\n event_request_body = {\n 'start': {\n 'dateTime': df.convert_to_RFC_datetime(d_and_t[0], d_and_t[1], d_and_t[2], d_and_t[3][0]-2, d_and_t[3][1]),\n 'timeZone': 'Africa/Johannesburg'\n },\n 'end': {\n 'dateTime': df.convert_to_RFC_datetime(d_and_t[0], d_and_t[1], d_and_t[2], d_and_t[4][0]-2, d_and_t[4][1]),\n 'timeZone': 'Africa/Johannesburg'\n },\n 'summary': f\"{username} - Code Clinic\",\n 'description': 'empty',\n 'status': 'confirmed',\n 'transparency': 'opaque',\n 'visibility': 'public',\n 'location': 'WTC',\n 'guestsCanModify': True,\n 'attendees': [\n { \n 'displayName': username,\n 'organizer': True,\n 'email': f'{username}@student.wethinkcode.co.za',\n 'optional': True,\n 'responseStatus': 'accepted'\n }\n ]\n }\n start = event_request_body['start']['dateTime']\n end = event_request_body['end']['dateTime']\n\n overlaps = check_if_slots_overlap(start, end, service, username)\n if overlaps == False:\n response = service.events().insert(calendarId=get_events.calendar_id, sendUpdates='all', body=event_request_body).execute()\n print(\"\\nYour slot has been created...\")\n else:\n print(\"\\nYou already have an event scheduled for this time. Please choose another time...\")\n events, count = get_events.get_events_for_next_7_days_to_delete(username, service)\n if count == 0:\n print(\"\\nYou currently don't have any slots created.\")\n return", "def append_schedule(self, host, schedule):\n if isinstance(host, str):\n host = resolve_host_name(host)\n\n host_schedule = self.host_schedule(host['name'])\n\n for name, run_policy, command in schedule:\n e = Event(name, run_policy, command)\n host_schedule.append(e)", "def register_schedule(self, term, schedule, allow_waitlisting=True, at=None):\n items = self.schedules(term, include_units=True)[schedule]\n self.register_courses(term, schedule, items, allow_waitlisting, at)", "def test_add(self):\n sched = Schedule()\n sched.append(Play(Waveform(np.ones(5)), DriveChannel(0)), inplace=True)\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"u1\", 1, sched)\n inst_map.add(\"u1\", 0, sched)\n\n self.assertIn(\"u1\", inst_map.instructions)\n self.assertEqual(inst_map.qubits_with_instruction(\"u1\"), [0, 1])\n self.assertTrue(\"u1\" in inst_map.qubit_instructions(0))\n\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", (), sched)\n with self.assertRaises(PulseError):\n inst_map.add(\"u1\", 1, \"not a schedule\")", "def pre_schedule(self):\n return []", "def post_schedule(self):\n return []", "def add_schedule(self, schedule: Schedule, span: FiniteSpan) -> None:\n for schedule_item in schedule._schedule_items:\n for event in schedule_item.events(span):\n self.add_event(event)", "def test_pre_fill_and_assign(self):\n users = []\n for i in range(1, 50):\n users.append(User.objects.create_user(username=\"u{0}\".format(i)))\n pre_fill.main([\"--managers\", \"--workshift\"])\n utils.make_workshift_pool_hours(semester=self.semester)\n # Assign manager shifts beforehand\n for user, manager in zip(users, Manager.objects.all()):\n manager.incumbent = UserProfile.objects.get(user=user)\n manager.save()\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)", "def test_meeting_registrants(self):\n pass", "def invite_site_users(users):\n #group(run_cron.s(item) for item in sites).delay()\n pass", "def get_time_slots_map_for_user(start_time, end_time, user):\n\n time_slots_map = TimeSlotsMap(start_time=start_time, end_time=end_time)\n\n user_query = Query()\n schedules = db.search(user_query.user == user)\n\n schedules.sort(key=lambda x:x['created_time'])\n\n for sc in schedules:\n day_sc_map = TimeHourSlotsInOneDay(initial_value=str(sc[\"available_hours_in_a_day\"]))\n\n sc_start_time = parse_datetime_str(sc.get(\"start_time\"))\n sc_end_time = parse_datetime_str(sc.get(\"end_time\"))\n\n time_slots_map.overlay_slots_map(time_slots_map=day_sc_map,start_time=sc_start_time,\n end_time=sc_end_time, repeat=True)\n\n return time_slots_map", "def set_schedule(self, new_schedule):\n #first, set all the others to inactive\n\n new_schedule.deprecated=False\n if new_schedule.started == None or new_schedule.started <= datetime.utcnow():\n new_schedule.started=datetime.utcnow()\n for sched in self.weekly_schedule:\n if not sched.deprecated:\n #sched.deprecated=True\n sched.ended=datetime.utcnow()\n sched.save()\n elif new_schedule.started > datetime.utcnow():\n #if it's in the future, then don't deprecate the future schedule, just procede along and let the system set the dates correctly\n pass\n self.weekly_schedule.append(new_schedule)\n self.save()", "def update_schedule_users(self, schedule, team_members):\r\n schedule_json = {\r\n \"name\": schedule[\"summary\"],\r\n \"type\": \"schedule\",\r\n \"time_zone\": schedule[\"time_zone\"],\r\n \"description\": schedule[\"description\"],\r\n \"schedule_layers\": [\r\n {\r\n \"start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_virtual_start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_turn_length_seconds\": 86400,\r\n \"users\": []\r\n }\r\n ]\r\n }\r\n\r\n for member in team_members:\r\n pagerduty_user = self.find(\"users\", member[\"name\"], attribute=\"name\")\r\n if pagerduty_user is not None:\r\n schedule_json[\"schedule_layers\"][0][\"users\"].append({\r\n \"user\": {\r\n \"type\": \"user\",\r\n \"id\": pagerduty_user[\"id\"]\r\n }\r\n })\r\n\r\n try:\r\n self.rput(\"schedules/\" + schedule[\"id\"], json=schedule_json)\r\n except PDClientError as e:\r\n raise e", "def add_schedule(self, schedule_dict):\n sub_task = SchedulePolicies.schedule_json(self.policy_type, schedule_dict)\n sub_task[\"subTaskOperation\"] = 2\n self._subtasks.append(sub_task)\n self._modify_schedule_policy_properties()", "def add_jira_entries(config, date, dry_run, economic):\n if date is not None:\n jira = Jira(config.items('Jira'))\n for task in jira.get_tasks():\n if task:\n economic.add_time_entry(task, dry_run)", "def register(self, task, schedule, minutes: int = None):\n self.task_list.append(ScheduledTask(task, schedule, minutes))", "def append_schedule(*args, **kwargs):\n return get_schedule().append_schedule(*args, **kwargs)", "def import_schedule(user, classes, start_date):\n\n for cls in classes:\n create_event(user, cls, start_date)", "def test_check_user_cals(self):\n instmap = FakeOpenPulse2Q().defaults().instruction_schedule_map\n\n test_u1 = Schedule()\n test_u1 += ShiftPhase(Parameter(\"P0\"), DriveChannel(0))\n\n instmap.add(\"u1\", (0,), test_u1, arguments=[\"P0\"])\n publisher = instmap.get(\"u1\", (0,), P0=0).metadata[\"publisher\"]\n\n self.assertEqual(publisher, CalibrationPublisher.QISKIT)", "def test_get_monitoring_schedules_vendor_v3(self):\n pass", "def test_75_task_settings_scheduler(self, mock):\r\n # Creat root user\r\n self.register()\r\n self.signout()\r\n # Create owner\r\n self.register(fullname=\"owner\", name=\"owner\")\r\n self.new_application()\r\n url = \"/app/sampleapp/tasks/scheduler\"\r\n form_id = 'task_scheduler'\r\n self.signout()\r\n\r\n # As owner and root\r\n for i in range(0, 1):\r\n if i == 0:\r\n # As owner\r\n self.signin(email=\"owner@example.com\")\r\n sched = 'random'\r\n else:\r\n sched = 'default'\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"There should be a %s section\" % form_id\r\n assert dom.find(id=form_id) is not None, err_msg\r\n res = self.task_settings_scheduler(short_name=\"sampleapp\",\r\n sched=sched)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Task Scheduler should be updated\"\r\n assert dom.find(id='msg_success') is not None, err_msg\r\n app = db.session.query(App).get(1)\r\n assert app.info['sched'] == sched, err_msg\r\n self.signout()\r\n\r\n # As an authenticated user\r\n self.register(fullname=\"juan\", name=\"juan\")\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"User should not be allowed to access this page\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # As an anonymous user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"User should be redirected to sign in\"\r\n assert dom.find(id=\"signin\") is not None, err_msg\r\n\r\n # With hidden app\r\n app.hidden = 1\r\n db.session.add(app)\r\n db.session.commit()\r\n self.register(fullname=\"daniel\", name=\"daniel\")\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 403, res.status_code\r\n self.signout()\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n # Correct values\r\n err_msg = \"There should be a %s section\" % form_id\r\n assert dom.find(id=form_id) is not None, err_msg", "def create_additional_tasks(testcase):\n # No need to create progression task. It is automatically created by the cron\n # handler.\n task_creation.create_impact_task_if_needed(testcase)\n task_creation.create_regression_task_if_needed(testcase)\n task_creation.create_symbolize_task_if_needed(testcase)\n task_creation.create_variant_tasks_if_needed(testcase)", "def test_can_add_todo_list():\n scheduler = Scheduler()\n new_id = uuid.uuid4()\n\n scheduler.add_todo_list(new_id, \"my todo list\")\n\n Is(scheduler.get_amount_of_todo_lists()).not_none.integer.has_same_truth_of(1)", "def add_task(action, user):\n \n item = Item()\n item.description = action['what'].get('description', '')\n item.id = action['what']['id']\n item.position = action['what']['position']\n \n l = List.objects.get(id=action['listId'])\n verify_permission(l, user)\n \n l.items.append(item)\n l.save()\n \n return l", "def test_put_monitoring_schedule_vendor_v3(self):\n pass", "def add_availability_week(date, user):\n diversions = diversion_for_week(date)\n for diversion in diversions:\n unavailable = diversion['Unavailable']\n if user in unavailable:\n unavailable.remove(user)\n available = diversion['Available']\n if user not in available:\n available.append(user)\n resp = table.update_item(\n Key={\"WeekOf\": date, \"Paper\": diversion['Paper']},\n ExpressionAttributeNames={\n \"#available\": \"Available\",\n \"#unavailable\": \"Unavailable\"\n },\n ExpressionAttributeValues={\n \":available\": available,\n \":unavailable\": unavailable\n },\n UpdateExpression=\"SET #available = :available, #unavailable = :unavailable\"\n )", "def _test_pre_fill_and_assign_humor(self):\n for i in range(1, 50):\n User.objects.create_user(username=\"u{0}\".format(i))\n pre_fill.main([\"--managers\", \"--workshift\"])\n utils.make_workshift_pool_hours(semester=self.semester)\n # Assign manager shifts beforehand\n manager_shifts = RegularWorkshift.objects.filter(\n pool=self.p1, workshift_type__auto_assign=False,\n )\n profiles = WorkshiftProfile.objects.all()\n for profile, shift in zip(profiles, manager_shifts):\n shift.current_assignees.add(profile)\n shift.save()\n unfinished = utils.auto_assign_shifts(\n self.semester, pool=WorkshiftPool.objects.get(title=\"Humor Shift\")\n )\n self.assertEqual([], unfinished)", "def find_schedules(user_id: int):\n result = {}\n schedule_plans = sdk.all_scheduled_plans(user_id=user_id)\n for i in schedule_plans:\n result[i['name']] = i['id']\n return result", "def test_admin_calendar_user_admin_add(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def testMainScheduler(self):\n # ARRANGE\n\n numGuardsToAllocate = 3\n guardsAllocated = []\n \n entries = []\n entries.append(GuardEntry(\"Mike\", 0, 12))\n entries.append(GuardEntry(\"Ray\", 3, 9))\n entries.append(GuardEntry(\"Dave\", 4, 8))\n\n # 12 slots 8pm to 2am\n numTimeSlots = 12\n \n # ACT\n\n # Setup the schedule\n (schedule, guardsAllocated) = createSchedule(entries, numTimeSlots)\n timeSlots = schedule.getSchedule()\n \n # ASSERT\n\n # Print details of the schedule\n timeSlotIdx = 0\n print(\"Time Slot,Guard ID\")\n for slot in timeSlots:\n print(str(timeSlotIdx) + \",\" + str(slot.guardID))\n timeSlotIdx += 1\n self.assertTrue(len(guardsAllocated) == 3)", "def add(self, schedule):\n try:\n if schedule in self.set:\n self.log.error(\"%s has already been added to this Scheduler.\" %\n schedule)\n return\n self.log.debug('Adding %s to timer for %s.' %\n (schedule, schedule.next))\n self.timer.add_task(schedule.next, self._enqueue, [schedule])\n self.set.add(schedule)\n except:\n self.log.error(\n \"Invalid schedule %s found, deleting.\" % schedule)\n schedule.soft_delete()", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n s3 = Schedule()\n s3.hour_from = 0\n s3.min_from = 0\n s3.hour_to = 21\n s3.min_to = 59\n s3.interval = 60*60*3 \n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def test_add(self):\n # Everything added will be deleted later in test_delete.\n first_name = 'Trevor'\n last_name = 'Harvey'\n entry_date = '04/19/2012'\n title = 'Test'\n minutes = 34\n notes = 'testing entries. and regex (555) 555-3425'\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)\n # second test add\n first_name = 'Nik'\n last_name = 'Silver'\n entry_date = '01/14/1827'\n title = 'random@mail.com'\n minutes = 34\n notes = 'This is an email test.'\n\n self.data.add(first_name, last_name, entry_date, title, minutes, notes)", "def test_manage_report_schedule_enums(\n self, api_instance: Reports, report_type, schedule\n ):\n params = api_instance.manage_report_schedule(\n report_type=report_type,\n schedule=schedule,\n )\n self.assert_common_params(params, action=\"ManageReportSchedule\")\n assert params[\"ReportType\"] == \"_GET_STRANDED_INVENTORY_UI_DATA_\"\n assert params[\"Schedule\"] == \"_30_MINUTES_\"", "def _test_auto_assign_fifty(self):\n shifts = []\n for i in range(50):\n shifts.append(\n RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=5,\n )\n )\n for i in range(1, 50):\n User.objects.create_user(username=\"u{0}\".format(i))\n\n utils.make_workshift_pool_hours(semester=self.semester)\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)\n for shift in shifts:\n self.assertEqual(1, shift.current_assignees.count())", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 30\n s1.hour_to = 23\n s1.min_to = 30\n s1.interval = 60*30\n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 30\n s2.hour_to = 23\n s2.min_to = 30\n s2.interval = 60*60\n\n s3 = Schedule()\n s3.hour_from = 22\n s3.min_from = 0\n s3.hour_to = 23\n s3.min_to = 30\n s3.interval = 60*5\n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 25 )", "def new_schedule():\n sched = OrderedDict()\n for year, stype, week in year_phase_week():\n update_week(sched, year, stype, week)\n return sched", "def AddScheduleTime(parser):\n parser.add_argument(\n '--schedule-time',\n type=arg_parsers.Datetime.Parse,\n help=(\n 'When specifying SPECIFIC_TIME, the date and time at which to '\n 'schedule the maintenance in ISO 8601 format.'\n ),\n )", "def add_user_subject(self, user, date_time):\n self._update_user_activity(self.num_subjects_per_user, user, date_time)", "def set_schedule(self, day, week, schedule):\n self.schedule['schedule'][day][week] = schedule", "def test_cron_workflow_service_list_cron_workflows2(self):\n pass", "def test_add_block(self):\n sched = ScheduleBlock()\n sched.append(Play(Waveform(np.ones(5)), DriveChannel(0)), inplace=True)\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"u1\", 1, sched)\n inst_map.add(\"u1\", 0, sched)\n\n self.assertIn(\"u1\", inst_map.instructions)\n self.assertEqual(inst_map.qubits_with_instruction(\"u1\"), [0, 1])\n self.assertTrue(\"u1\" in inst_map.qubit_instructions(0))", "def setSchedule(self, schedule):\r\n if isinstance(schedule, list) == False:\r\n print(\"Schedule needs to be of type list\")\r\n return False\r\n elif len(schedule) != 0 and any(isinstance(entry, dict) == False for entry in schedule):\r\n print(\"Entries in schedule need to be of type dict\")\r\n return False\r\n else:\r\n self.schedule = schedule\r\n return True", "def test_update_instances_schedule_state(self):\n pass", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_retrieve_instances_schedule_state(self):\n pass", "def __init__(self, schedules):\n self.__schedules = copy.deepcopy(schedules)", "def test_set_project_default_power_schedule(self):\n pass", "def create_shows(user, schedule=Schedule()):\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday']\n for episode in schedule.episodes:\n FollowedShows.objects.get_or_create(\n user=user,\n show_id=episode.show.id,\n defaults={\n 'show_name': episode.show.name,\n 'network': episode.show.network['name'],\n 'air_time': episode.airtime or '00:00',\n 'air_days': ','.join(episode.show.schedule['days']).lower(),\n 'summary': episode.summary or 'no summary'\n }\n )", "def _test_auto_assign_one_hundred_and_fifty(self):\n shifts = []\n for i in range(150):\n shifts.append(\n RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=5,\n )\n )\n for i in range(1, 150):\n User.objects.create_user(username=\"u{0}\".format(i))\n\n utils.make_workshift_pool_hours(semester=self.semester)\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)\n for shift in shifts:\n self.assertEqual(1, shift.current_assignees.count())", "def test_issue_add_time(self):\n pass", "def command_add(date, start_time, end_time, title, calendar):\n if is_calendar_date(date) and all([(i in range(0, 24)) for i in (start_time, end_time)]) and start_time <= end_time and is_natural_number(str(start_time)) and is_natural_number(str(end_time)):\n event = {\n \"start\": start_time,\n \"end\": end_time,\n \"title\": title,\n }\n if calendar.get(date) is None:\n calendar[date] = [event]\n else:\n calendar[date].insert(0, event)\n # calendar[date].append(event)\n return True\n return False", "def test_put_monitoring_schedule_manufacturer_v3(self):\n pass", "def uplink_schedule(self):\n self.test.reset_sattest()\n self.test.zero_epoch() #TODO: we won't always do this - use real sat epoch\n self.test.add_schedule(self.cmds_list[:]) # add all of our commands\n\n for cmd in self.cmds_list:\n epoch_to_send = cmd.epoch # for relative, just subtract current sat epoch .. that's why we have a var\n #TODO: determine schedule time from now based on relative flag\n\n print(\"COMMAND: \", epoch_to_send, cmd.cmdid)\n Clock.schedule_once(partial(self.test.uplink, cmd.cmdid), int(epoch_to_send))\n Clock.schedule_once(partial(self.test.command_timeout, cmd.cmdid), epoch_to_send + cmd.timeout)", "def get_my_schedules():\r\n schedules = DailyScheduleModel.get_daily_schedules_by_user(g.user.get('id'))\r\n user_schedules = daily_schedule_schema.dump(schedules, many=True)\r\n return custom_response(user_schedules, 200)", "def _setup_volunteer_hours(\n volunteer,\n npf_admin,\n org,\n project,\n datetime_start,\n datetime_end,\n description=\"Manually tracked time \",\n event_type=\"MN\",\n is_verified=False,\n action_type='req'\n):\n event = Event.objects.create(\n project=project,\n is_public=True,\n description=\"finished event\",\n location=\"test_location\",\n coordinator=npf_admin,\n event_type=event_type,\n datetime_start=datetime_start,\n datetime_end=datetime_end\n )\n\n volunteer_timelog = UserTimeLog.objects.create(\n user=volunteer,\n event=event,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n is_verified=is_verified\n )\n\n actiontimelog = AdminActionUserTime.objects.create(\n user=npf_admin,\n usertimelog=volunteer_timelog,\n action_type=action_type\n )\n\n return volunteer_timelog, actiontimelog, event", "def _add_test_items_for_samples(\n self,\n items_map: Dict[int, TestItem],\n tz: Any,\n ) -> None:\n\n for year in range(self.start_year, self.until_year):\n for month in range(1, 13):\n # Add a sample test point on the first of each month\n dt_wall = datetime(year, month, 1, 0, 0, 0)\n dt_local = tz.localize(dt_wall)\n dt_local = tz.normalize(dt_local)\n item = self._create_test_item(dt_local, 'S')\n self._add_test_item(items_map, item)\n\n # Add a sample test point at the end of the year.\n dt_wall = datetime(year, 12, 31, 23, 59, 0)\n dt_local = tz.localize(dt_wall)\n dt_local = tz.normalize(dt_local)\n item = self._create_test_item(dt_local, 'Y')\n self._add_test_item(items_map, item)", "def test_getEventsForItinerary(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n\n rv = self.json_get('/getEventsForItinerary/bbbb', date)\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert 'Itinerary for the day not found' in str(rv.data)\n\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert '{\"events\": []}' in str(rv.data)\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state", "def test_schedule_with_multiple_parameters_under_same_name(self):\n param1 = Parameter(\"param\")\n param2 = Parameter(\"param\")\n param3 = Parameter(\"param\")\n\n target_sched = Schedule()\n target_sched.insert(0, ShiftPhase(param1, DriveChannel(0)), inplace=True)\n target_sched.insert(10, ShiftPhase(param2, DriveChannel(0)), inplace=True)\n target_sched.insert(20, ShiftPhase(param3, DriveChannel(0)), inplace=True)\n\n inst_map = InstructionScheduleMap()\n inst_map.add(\"target_sched\", (0,), target_sched)\n\n ref_sched = Schedule()\n ref_sched.insert(0, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n ref_sched.insert(10, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n ref_sched.insert(20, ShiftPhase(1.23, DriveChannel(0)), inplace=True)\n\n test_sched = inst_map.get(\"target_sched\", (0,), param=1.23)\n\n for test_inst, ref_inst in zip(test_sched.instructions, ref_sched.instructions):\n self.assertEqual(test_inst[0], ref_inst[0])\n self.assertAlmostEqual(test_inst[1], ref_inst[1])", "def test_list_scheduled_payments_specific_accounts(self):\n pass", "def checkUpstreamScheduler():", "def user_pre_slotted(cc_events, user_name):\n creators = list()\n start_times = list()\n for i in range(len(cc_events)):\n creators.append(cc_events[i]['creator']['email'])\n start_times.append(cc_events[i]['start']['dateTime'])\n\n creator_names = list()\n for i in range(len(creators)):\n name = creators[i]\n name = name.split('@')\n creator_names.append(name[0])\n\n slots = [cc_events[num]['start']['dateTime'] for num, user in enumerate(creator_names) if creator_names[num] == user_name]\n return slots", "def test_cron_workflow_service_list_cron_workflows(self):\n pass", "def create_work_item(self):", "def test_success_edit(event_member):\n _, member, event_id = event_member\n current = date.today() + timedelta(days=1)\n start = datetime.combine(current, time(19, 30))\n end = start + timedelta(hours=2, minutes=30)\n edit(member.username, event_id, True, start, end)\n\n # Check that the user's availability was updated\n schedule = data.events[event_id].availabilities[member.username].times\n days_from_creation = 1\n start_index = 2 * start.hour + start.minute // 30\n end_index = 2 * end.hour + end.minute // 30\n\n for d in range(MAX_DAYS):\n if any(schedule[d]):\n print(d, schedule[d])\n for t in range(INTERVALS):\n if d == days_from_creation and start_index <= t < end_index:\n assert schedule[d][t]\n else:\n assert not schedule[d][t]", "def _use_existing_schedule(self):\n sh = shelve.open(os.path.expanduser('~/.config/scheduler/schedule'))\n self.schedule = sh['schedule']\n sh.close()", "def create_appointments(\n data: AppointmentCreate,\n background_tasks: BackgroundTasks, \n user: User = Depends(deps.get_user),\n db: Session = Depends(deps.get_db),\n rdc: RedisCache = Depends(deps.get_redis)\n) -> Any:\n db_provider = crud_user.get_user_by_id(db, str(data.provider_id))\n if not db_provider:\n raise HTTPException(\n status_code=404, \n detail=\"Cabeleireiro não encontrado\"\n )\n\n current_date = datetime.now()\n compare_date = data.date.replace(tzinfo=None)\n if compare_date < current_date:\n raise HTTPException(\n status_code=400, \n detail=\"Você não pode marcar agendamento em datas passadas\"\n )\n \n if data.date.hour < 8 or data.date.hour > 17:\n raise HTTPException(\n status_code=400, \n detail=\"Você só pode cria agendamentos entre 8:00 e 17:00\"\n )\n\n if data.provider_id == user.id:\n raise HTTPException(\n status_code=400, \n detail=\"Você não pode marca agendamento consigo mesmo\"\n )\n\n validate_date = crud_appointment.get_appointment_by_date(db, data.provider_id, data.date)\n if validate_date:\n raise HTTPException(status_code=400, detail=\"Este horario já esta agendado\")\n\n appointment = crud_appointment.create(db, data, user)\n msg = f\"Novo agendamento de {user.name} {user.surname} para o {date.format_date(data.date)}\"\n background_tasks.add_task(crud_notification.create, str(data.provider_id), msg)\n date_time = data.date\n rdc.invalidate_cache(\n f\"providers-appointments:{data.provider_id}:{date_time.year}:{date_time.month}:{date_time.day}\"\n )\n rdc.invalidate_cache(f\"user-appointments:{user.id}\")\n\n return appointment", "def scheduled_tasks(request):\n\n # TODO: RYAN, here call schedule 1 (gameboard cleaner), don't forget to import it above\n # ryan_code.destroy_games()\n # IF ERROR EXAMPLE:\n # return Response({'error': 'Did not work because_!'},\n # status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n # TODO: RYAN, here call schedule 2 (scores sorter to assign ranking to each user), don't forget to import it above\n # ryan_code2.set_rankings()\n\n # TODO: RYAN, no code here but need to setup the CI on github to have URL to this API call\n return Response({'Done'})", "def add_calendar_entries(calendar, dry_run, economic, date):\n today = date.isoformat()[:10] + \"T00:00:00Z\"\n tomorrow = (date + datetime.timedelta(days=1)).isoformat()[:10] + \"T00:00:00Z\"\n for event in calendar.get_events(today, tomorrow):\n try:\n entry = economic.convert_calendar_event_to_entry(event)\n if entry:\n economic.add_time_entry(entry, dry_run)\n except UnicodeDecodeError as e:\n print(e)", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "async def test_modify_schedule_type(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = 'sleep10'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n manual_schedule = ManualSchedule()\n manual_schedule.schedule_id = interval_schedule.schedule_id\n manual_schedule.name = 'manual'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n\n # Assert: only 1 task is running\n schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n assert isinstance(schedule, ManualSchedule)\n\n await self.stop_scheduler(scheduler)", "def svn_info_t_schedule_set(svn_info_t_self, svn_wc_schedule_t_schedule): # real signature unknown; restored from __doc__\n pass", "def _add_user_to_queue(self, user):\n now = datetime.now(tz=pytz.utc)\n HatQueue.create(user_id=user, start_time=now)\n return HatQueue.select().where(HatQueue.end_time.is_null(True), HatQueue.start_time < now).count()", "def test_get_monitoring_schedules_manufacturer_v3(self):\n pass", "def load_schedule(self, schedule):\n for sched in schedule:\n assert type(sched[\"num_batches\"]) == int\n if sched[\"weights\"] is not None: # schedule specificies specific variables for trainable vars\n assert type(sched[\"weights\"]) == list\n else: # scalar is used\n sched[\"weights\"] = self.get_trainable_variable_names()\n target_len = len(sched[\"weights\"])\n sched[\"weight_lr\"] = self.check_schedule_type(sched[\"weight_lr\"], float, target_len)\n sched[\"decay_steps\"] = self.check_schedule_type(sched[\"decay_steps\"], int, target_len)\n sched[\"decay_rate\"] = self.check_schedule_type(sched[\"decay_rate\"], float, target_len)\n sched[\"staircase\"] = self.check_schedule_type(sched[\"staircase\"], bool, target_len)", "def _add_test_items_for_transitions(\n self,\n items_map: Dict[int, TestItem],\n tz: Any,\n ) -> None:\n\n transitions = self._find_transitions(tz)\n for (left, right, only_dst) in transitions:\n left_item = self._create_test_item(\n left, 'a' if only_dst else 'A')\n self._add_test_item(items_map, left_item)\n\n right_item = self._create_test_item(\n right, 'b' if only_dst else 'B')\n self._add_test_item(items_map, right_item)", "def add(self, item):\n self.sleeping.reveille() # wake items whose sleep timer has expired\n self.stack.push(item)", "def test_calendar_user_view_add(self):\n request = self.factory.get('/module/calendar_user/add/')\n request.user = self.user\n request.session = {}\n response = calendar_user_add(request)\n self.assertEqual(response.status_code, 200)\n\n response = self.client.post('/module/calendar_user/add/', data=\n {\n \"username\": \"caluser1\",\n \"password\": \"caluser1\",\n \"calendar_setting_id\": 1,\n }, follow=True)\n self.assertEqual(response.status_code, 200)\n\n request = self.factory.post('/module/calendar_user/add/',\n {\n \"username\": \"caluser1\",\n \"password\": \"caluser1\",\n \"calendar_setting_id\": 1\n }, follow=True)\n request.user = self.user\n request.session = {}\n response = calendar_user_add(request)\n self.assertEqual(response.status_code, 200)", "def load_timers(bot):\n\n load_single_timers(bot)\n\n all_users = timers_settings.keys('*')\n\n for user in all_users:\n user_id = int(user)\n\n current_timer_minutes = int(timers_settings.hget(user_id, 'current_time').decode())\n scheduled_bunch = timers_settings.hget(user_id, 'scheduled_bunch').decode()\n message_id = int(timers_settings.hget(user_id, 'message_id').decode())\n\n if current_timer_minutes == 0 and not scheduled_bunch:\n message = 'Current timers were turned off due to inactivity\\n\\n' \\\n 'Текущие таймеры были отключены в связи с неактивностью'\n\n try:\n bot.edit_message_text(text=message, chat_id=user_id, message_id=message_id)\n timers_settings.delete(user)\n\n except:\n pass\n\n continue\n\n bot_collection[user_id] = TimeManagerBot(user_id, 'EN')\n bot_collection[user_id].load_settings()\n\n bot_collection[user_id].message_id = message_id\n bot_collection[user_id].paused = bool(int(timers_settings.hget(user_id, 'paused').decode()))\n bot_collection[user_id].timers.additional_time = bool(int(timers_settings.hget(user_id, 'additional_time').decode()))\n bot_collection[user_id].how_many_extended = int(timers_settings.hget(user_id, 'how_many_extended').decode())\n bot_collection[user_id].timers.extended = int(timers_settings.hget(user_id, 'extended').decode())\n\n\n if scheduled_bunch:\n bot_collection[user_id].timers.scheduled_bunch = deque([int(i) for i in scheduled_bunch.split('-')])\n\n prev_bunch = timers_settings.hget(user_id, 'prev_bunch').decode()\n\n if prev_bunch:\n bot_collection[user_id].timers.prev_bunch = deque([int(i) for i in prev_bunch.split('-')])\n\n if current_timer_minutes and not bot_collection[user_id].paused:\n last_timer_start = datetime.fromisoformat(timers_settings.hget(user_id, 'last_timer_start').decode())\n bot_collection[user_id].last_timer_start = last_timer_start\n\n last_timer_end = last_timer_start + timedelta(minutes=int(current_timer_minutes))\n current_time = datetime.now()\n\n remain_time = last_timer_end - current_time\n remain_minutes = convert_time(remain_time)\n\n if remain_time.days < 0 or remain_minutes == 0:\n # Change message, ring alarm\n bot_collection[user_id].timers.current_time = current_timer_minutes\n update_timer(bot, user_id, message_id)()\n\n else:\n # Start timer with remain time\n bot_collection[user_id].timers.extended = remain_minutes\n start_timer(bot, user_id, message_id)", "def my_schedule(request,username):\n\n user = get_object_or_404(User, username=username)\n user_profile = UserProfile.objects.get_or_create(user=user)[0]\n weekly_schedule = WeeklySchedule.objects.filter(user_profile=user_profile)\n\n userScheduleInlineFormSet = inlineformset_factory(UserProfile, WeeklySchedule,\n fields=('day_of_week', 'time_from', 'time_to'),\n extra=1, can_delete=True)\n\n # prepare data for rendering in table\n user_schedule = weekly_schedule.values_list('day_of_week','time_from','time_to')\n rows = pivot_schedule(user_schedule)\n\n if request.method == 'POST':\n formset = userScheduleInlineFormSet(request.POST, instance=user_profile,)\n if formset.is_valid():\n formset.save()\n return redirect('my_schedule', user.username)\n else:\n formset = userScheduleInlineFormSet(instance=user_profile,)\n\n return render(\n request,\n 'schedule/myschedule.html',\n {\n 'formset': formset,\n 'days_of_week': WeeklySchedule.DAY_OF_WEEK,\n 'data': rows,\n }\n )", "def test_meeting(self):\n pass", "def __init__(self, database_manager=DataBaseManager(), emailer=EmailSender()):\n self.database_manager = database_manager\n self.emailer = emailer\n # Set available timeslots\n self.initial_time_slots = ['09:00:00',\n '10:00:00',\n '11:00:00',\n '12:00:00',\n '13:00:00',\n '14:00:00',\n '15:00:00',\n '16:00:00',\n '17:00:00']", "def test_list_scheduled_payments(self):\n pass", "def test_meeting_polls(self):\n pass", "def mlbschedule(self, irc, msg, args, optteam):\n \n optteam = optteam.upper().strip()\n\n if optteam not in self._validteams():\n irc.reply(\"Team not found. Must be one of: %s\" % self._validteams())\n return\n \n lookupteam = self._translateTeam('yahoo', 'team', optteam) # (db, column, optteam)\n\n url = self._b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbWxiL3RlYW1z') + '/%s/calendar/rss.xml' % lookupteam\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Cannot open: %s\" % url)\n return\n \n if \"Schedule for\" not in html:\n irc.reply(\"Cannot find schedule. Broken url?\")\n return\n \n # clean this stuff up\n html = html.replace('<![CDATA[','') #remove cdata\n html = html.replace(']]>','') # end of cdata\n html = html.replace('EDT','') # tidy up times\n html = html.replace('\\xc2\\xa0',' ') # remove some stupid character.\n\n soup = BeautifulSoup(html)\n items = soup.find('channel').findAll('item')\n\n append_list = []\n\n for item in items:\n title = item.find('title').renderContents().strip() # title is good.\n day, date = title.split(',')\n desc = item.find('description') # everything in desc but its messy.\n desctext = desc.findAll(text=True) # get all text, first, but its in a list.\n descappend = (''.join(desctext).strip()) # list transform into a string.\n if not descappend.startswith('@'): # if something is @, it's before, but vs. otherwise.\n descappend = 'vs. ' + descappend\n descappend += \" [\" + date.strip() + \"]\" # can't translate since Yahoo! sucks with the team names here. \n append_list.append(descappend) # put all into a list.\n\n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} {1}\".format(ircutils.bold(optteam), descstring)\n \n irc.reply(output)", "def schedule_handler(userdata, *args):\n\t\tfor event in database.devschedule(userdata[\"cursor\"], args[0]):\n\t\t\tprint(str(event))\n\t\t\n\t\tprint(\"\")" ]
[ "0.68778497", "0.6814362", "0.67497754", "0.6255919", "0.5921792", "0.58371866", "0.58195525", "0.5796732", "0.57783014", "0.5733727", "0.5705526", "0.5705507", "0.5691883", "0.5643566", "0.5565707", "0.5564822", "0.5547369", "0.5525819", "0.552174", "0.5512991", "0.5498908", "0.5484778", "0.5469756", "0.5459951", "0.545529", "0.54383945", "0.5435166", "0.5414053", "0.54132766", "0.5375316", "0.5369238", "0.5368893", "0.53688014", "0.53649503", "0.53564006", "0.5352465", "0.5339525", "0.53394526", "0.53201056", "0.5310499", "0.53028667", "0.5293899", "0.52904534", "0.52853996", "0.527792", "0.52760607", "0.52620345", "0.52552927", "0.5255165", "0.52476716", "0.524022", "0.5238243", "0.52253675", "0.5215856", "0.52148634", "0.5208867", "0.5200211", "0.52000886", "0.5195682", "0.5195423", "0.5176217", "0.5170913", "0.5161128", "0.5160923", "0.5153469", "0.5141331", "0.51366854", "0.51222", "0.5118765", "0.511338", "0.5104118", "0.50996226", "0.50979596", "0.5094025", "0.50900114", "0.50816983", "0.5081108", "0.5080045", "0.5072481", "0.50709075", "0.50683296", "0.5068193", "0.50680226", "0.50678736", "0.50660855", "0.5062305", "0.50612223", "0.50453633", "0.5041456", "0.50402206", "0.5038785", "0.5031925", "0.5031121", "0.5028354", "0.50269747", "0.5026265", "0.50226617", "0.50158256", "0.5013726", "0.5009763" ]
0.7738
0
Chooses moves for computer based on state of current board and difficulty of AI
def AI(current_board, AI_symbol, opponent_symbol, difficulty): #Written by Cody West victory_conditions = [[0,4,8],[2,4,6],[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8]] #Establishes victory conditions to be checked if difficulty >= 2: #If difficulty is at least 2 ## Cody -- you could just write: ## for slots in victory_conditions for n in range(len(victory_conditions)): #For each victory condition in victory_conditions ## Oops slots = victory_conditions[n] #Take the victory conditions and put them in a new list ## Oops check = [] #Creates empty folder called check for i in range(len(slots)): #For each spot in slots check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check ## This you can do even more efficiently using a beautiful syntax called ## "list comprehension" which entered python some years ago -- watch ## me do it in one line: ## check = [current_board[s] for s in slots] if check.count(AI_symbol)==2 and check.count(" ")==1: #If there are any rows where the AI has two symbols and there's one empty spot return(slots[check.index(" ")]) #Return the empty spot from that row ## Oops -- you repeat the code again here for no reason for n in range(len(victory_conditions)): #For each victory condition in victory_conditions slots = victory_conditions[n] #Take the victory conditions and put them in a new list check = [] #Creates empty folder called check for i in range(len(slots)): #For each spot in slots check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check if check.count(opponent_symbol)==2 and check.count(" ")==1: #If there are any rows where the opponent has two symbols and there's one empty spot return(slots[check.index(" ")]) #Return the empty spot from that row if difficulty >= 3: #If difficulty is at least 3 ## It looks like you're doing an identical loop here -- I ## wonder why you don't move the if statement inside the loop ## -- I believe that would significantly shorten your code for n in range(len(victory_conditions)): #For each victory condition in victory_conditions slots = victory_conditions[n] #Take the victory conditions and put them in a new list check = [] #Creates empty folder called check for i in range(len(slots)): #For each spot in slots check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check if check.count(AI_symbol)==1 and check.count(" ")==2: #If there are any rows where the AI has one symbol and there's two empty spots if check[0] == " ": #If the first slot from check is empty return(slots[0]) #Return the first slot else: return(slots[2]) #Return the third slot if difficulty == 4: #If difficulty is 4 if current_board[4] == " ": #If the center is empty return(4) #Take the center elif current_board[0] or current_board[2] or current_board[6] or current_board[8] == " ": #Else, if a corner is open corners = 2*random.randint(0,4) #Selects a random corner (or center, which will reject) while current_board[corners] != " ": #Until the corner selected is empty corners = 2*random.randint(0,4) #Select a new corner or center return(corners) #Return empty corner else: sides = 2*random.randint(0,3)+1 #Selects a side while current_board[sides] != " ": #Until the side is empty sides = 2*random.randint(0,3)+1 #Selects a new side return(sides) #Returns empty side if difficulty < 4: #If difficulty is less than 4 ran = random.randint(0,8) #Picks random spot on board while current_board[ran] != " ": #Until the spot is empty ran = random.randint(0,8) #Picks a new spot return(ran) #Returns empty spot
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def human_turn(c_choice, h_choice,xi,yi):\r\n depth = len(empty_cells(board))\r\n if depth == 0 or game_over(board):\r\n return\r\n\r\n # Dictionary of valid moves\r\n move = -1\r\n moves = {\r\n 0: [0, 0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5], 6: [0, 6], 7: [0, 7], 8: [0, 8], 9: [0, 9],\r\n 10: [1, 0], 11: [1, 1], 12: [1, 2], 13: [1, 3], 14: [1, 4], 15: [1, 5], 16: [1, 6], 17: [1, 7], 18: [1, 8], 19: [1, 9],\r\n 20: [2, 0], 21: [2, 1], 22: [2, 2], 23: [2, 3], 24: [2, 4], 25: [2, 5], 26: [2, 6], 27: [2, 7], 28: [2, 8], 29: [2, 9],\r\n 30: [3, 0], 31: [3, 1], 32: [3, 2], 33: [3, 3], 34: [3, 4], 35: [3, 5], 36: [3, 6], 37: [3, 7], 38: [3, 8], 39: [3, 9],\r\n 40: [4, 0], 41: [4, 1], 42: [4, 2], 43: [4, 3], 44: [4, 4], 45: [4, 5], 46: [4, 6], 47: [4, 7], 48: [4, 8], 49: [4, 9],\r\n 50: [5, 0], 51: [5, 1], 52: [5, 2], 53: [5, 3], 54: [5, 4], 55: [5, 5], 56: [5, 6], 57: [5, 7], 58: [5, 8], 59: [5, 9],\r\n 60: [6, 0], 61: [6, 1], 62: [6, 2], 63: [6, 3], 64: [6, 4], 65: [6, 5], 66: [6, 6], 67: [6, 7], 68: [6, 8], 69: [6, 9],\r\n 70: [7, 0], 71: [7, 1], 72: [7, 2], 73: [7, 3], 74: [7, 4], 75: [7, 5], 76: [7, 6], 77: [7, 7], 78: [7, 8], 79: [7, 9],\r\n 80: [8, 0], 81: [8, 1], 82: [8, 2], 83: [8, 3], 84: [8, 4], 85: [8, 5], 86: [8, 6], 87: [8, 7], 88: [8, 8], 89: [8, 9],\r\n 90: [9, 0], 91: [9, 1], 92: [9, 2], 93: [9, 3], 94: [9, 4], 95: [9, 5], 96: [9, 6], 97: [9, 7], 98: [9, 8], 99: [9, 9],\r\n \r\n }\r\n\r\n clean()\r\n print(f'Human turn [{h_choice}]')\r\n render(board, c_choice, h_choice)\r\n while move < 0 or move > 99:\r\n try:\r\n move = int(input('Final position HUMAN (0..99): '))\r\n coord = moves[move]\r\n can_move = set_move(coord[0], coord[1], 2, xi, yi)\r\n if not can_move:\r\n print('Bad move')\r\n move = -1\r\n except (EOFError, KeyboardInterrupt):\r\n print('Bye')\r\n exit()\r\n except (KeyError, ValueError):\r\n print('Bad choice')", "def play(self):\r\n state = copy.deepcopy(self.initial_state)\r\n # calculating the best move value and action for the given state\r\n best_action = self.minimax_decision(state)\r\n\r\n # To handle the case where there are no possible moves from the initial state\r\n if best_action[1] not in [(9, 9, 9), (6, 6), (5, 5)]:\r\n # Making the best move corresponding to the initial state\r\n state = copy.deepcopy(self.initial_state)\r\n expected_state = self.result(state, best_action[1])\r\n\r\n # Printing the board state resulting from the best move.\r\n print '{}'.format(self.convert_matrix_rastor(expected_state))", "def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best", "def _get_computer_move():\n return choice(choices)", "def decide(self, game, state, available_moves, opponent_moves):\n\t\tstatecopy = copy.deepcopy(state)\n\t\troot = GameNode(game, None, statecopy, available_moves, None)\n\t\ttree = GameTree(root)\n\t\tminimaxAB = AlphaBeta(tree)\n\t\tbest_state = minimaxAB.alpha_beta_search(tree.root)\n\t\tmove = best_state.action\n\t\treturn [move.row, move.column, move.shift]", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def ai_move():\n\tinitial_state = map(get_filled_edges, rects)\n\tpossible_moves = []\n\tfor index, filled_edges in enumerate(initial_state):\n\t\tif filled_edges == 0:\n\t\t\tpossible_moves.extend([(index, i) for i in 'ltrb'])\n\t\telif filled_edges == 1:\n\t\t\tpossible_moves.extend(one_filled_edge(index))\n\t\telif filled_edges == 2:\n\t\t\tpossible_moves.extend(two_filled_edge(index))\n\t\telif filled_edges == 3:\n\t\t\tpossible_moves.extend(three_filled_edge(index))\n\tprint possible_moves\n\tpossible_decisions = []\n\tfor move in possible_moves:\n\t\tfinal_state = apply_move(move)\n\t\tpossible_decisions.append(is_feasible(initial_state, final_state))\n\tprint possible_decisions\n\t# randomizing when some decisions have the same weight\n\tmax_weight = max(possible_decisions)\n\t# list of indices which have the same weight\n\tmax_indices = []\n\tfor index, weight in enumerate(possible_decisions):\n\t\tif weight == max_weight:\n\t\t\tmax_indices.append(index)\n\tx = choice(max_indices)\n\tprint x\n\treturn possible_moves[x]\n\t# return possible_moves[possible_decisions.index(max(possible_decisions))]", "def take_turn(state, hint):\n \"\"\" display the current state and the labels for choosing a move\"\"\"\n\n print(state) # print the game board\n print(color_magenta(hint))\n print(\"\") #add a space # print the numbers that correspond to all moves in the game board\n print(color_green(\"Your current score is: \"), color_green(str(state.score1)))\n print(color_green(\"AI's current score is: \"), color_green(str(state.check_finished_boxes() - state.score1))) # record the scores of the player and AI at the moment\n print(\"\") #add a space\n\n move = input(color_yellow(\"Please enter a number to connect: \"))\n\n \"\"\"prompt again for a move until it's a valid input and corresponds to an empty space in the board\"\"\"\n while not move.isnumeric() or not (0 <= int(move) <= 48) or (int(move) % 2 == 0) or state.board[int(move) // 7][int(move) % 7] != \"\":\n move = input(color_yellow(\"Please enter a valid connection: \"))\n number = move\n if number in hint:\n index = hint.find(number)\n if len(number) == 1:\n hint = hint[0:index] + \" \" + hint[index + 1:] # Make the moves player already made disappear\n else:\n hint = hint[0:index] + \" \" + hint[index + 2:]\n\n state.make_move(move)\n return hint", "def minimax(board):\n #This function will return the best move. \n #If Ai is playing as X, I can reduce the processing time by creating a random first move. \n if (board == initial_state()):\n coord1 = randint(0,2)\n coord2 = randint(0,2)\n return ((coord1,coord2))\n #first I determine which player's turn it is\n player_to_move = player(board)\n best_action = None\n #If I am X\n if(player_to_move == \"X\"):\n current_max = float('-inf')\n #for every possible action I have right now, I'll call my \"future\" Min_Value since I will asume what will happen if I take this move.\n for action in actions(board):\n #peak on the future if I take that move\n curr_score = Min_Value(result(board,action))\n #if my future is favorable, I will store it as my current best option.\n if curr_score>= current_max:\n current_max = curr_score\n best_action = action\n else:\n #If I am O, I do something similar. \n current_max = float('inf')\n #for every action I peak on the future for favorable results\n for action in actions(board):\n #this time, however, it would be X's turn so I need to start with Max_Value\n curr_score = Max_Value(result(board,action))\n #if my future is favorable, I store it\n if curr_score<= current_max:\n current_max = curr_score\n best_action = action\n #I return the best move.\n return best_action", "def move(self, board):\r\n self.start_time = time.time()\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n if disk_total < 15:\r\n # In early-game, we can allow a deeper minimax search since there's not too many possible moves.\r\n self.minimax_max_depth = 7\r\n\r\n elif disk_total < 45:\r\n # In mid-game, minimax tree has the most branches. Therefore, we must give it space to breathe.\r\n self.minimax_max_depth = 5\r\n else:\r\n # In the very end-game, minimax tree has the least branches, so we can allow a full search.\r\n self.minimax_max_depth = 8\r\n\r\n possible_moves = self.find_possible_moves(board, self.my_color)\r\n\r\n # If there's only one move available, return it\r\n if len(possible_moves) == 1:\r\n return possible_moves[0]\r\n\r\n # If we can take a corner, take it and don't consider any other options.\r\n # This rarely backfires and allows to save a tiny bit of time\r\n corners = [(0,0), (0,7), (7,0), (7,7)]\r\n for corner in corners:\r\n if corner in possible_moves:\r\n return corner\r\n\r\n # Grow a minimax tree to find the best available move\r\n alpha_init = -10000000\r\n beta_init = 10000000\r\n\r\n available_moves = self.minimax(board, 0, self.my_color, alpha_init, beta_init)\r\n print(available_moves)\r\n if available_moves != 0:\r\n best_value = max(available_moves.values())\r\n for move in available_moves:\r\n if available_moves[move] == best_value:\r\n return move\r\n\r\n return None", "def ChooseAction(board, player):\n\t# list of all possible initial actions\n possible_actions = list(i for i in range(16) if board[i] == 0)\n\t\n # default action\n chosen_move = min(possible_actions)\n\n # Dict to keep track of searched utilities and transposed/symmetric boards utilities\n trans_map = {}\n\t\n # function to use minimax to chose next move, we pass the current board, the possible actions and the player\n chosen_move = min_max_dec(possible_actions, copy.copy(board), player)\n \n return chosen_move", "def human_opponent(state):\n print(state)\n while True:\n inp = input(\"What is your move? \\n\")\n if inp == 'pass':\n return len(state.valid_actions) - 1\n if inp == 'random':\n return random.randint(0, len(state.valid_actions) - 1)\n\n try:\n pos = [int(x) for x in inp.split()]\n action = pos[0]*state.board_size + pos[1]\n choice = state.valid_actions.index(action)\n return choice\n except:\n print(\"Invalid move {} try again.\".format(inp))", "def play_game():\n\tstate = Coinche(verbose=True)\n\tbeliefs = [Belief(i, state) for i in range(4)]\n\n\twhile state.get_moves():\n\t\tprint(state)\n\t\tm = ismcts(rootstate=state, itermax=2000, verbose=False, belief=beliefs[state.player_to_move])\n\t\tprint(\"Best Move: \" + str(m) + \"\\n\")\n\t\tstate.do_move(m)\n\n\tfor p in range(state.number_of_players):\n\t\tprint(\"Player \" + str(p), state.get_result(p))", "def move(self, board):\n if self.name == \"Combination_Easy\":\n return self.alpha_beta_search(board, 1)\n elif self.name == \"Combination_Normal\":\n return self.alpha_beta_search(board, 2)\n elif self.name == \"Combination_Hard\":\n return self.alpha_beta_search(board, 3)\n elif self.name == \"static\":\n return self.static_player(board)\n elif self.name == \"parity\":\n return self.parity_player(board)\n elif self.name == \"mobility\":\n return self.mobility_player(board)\n elif self.name == \"pmobility\":\n return self.potential_mobility_player(board)\n elif self.name == \"corners\":\n return self.corners_player(board)\n elif self.name == \"stability\":\n return self.stability_player(board)", "def computer_move(board,move,player):\r\n com_execution(board, move, player)", "def find_best_move(state: GameState) -> None:", "def choose_move(self, board):\n if self.opp == Player.HUMAN:\n time.sleep(4)\n if self.type == Player.HUMAN:\n move = input(\"Please enter your move:\")\n while not board.legalMove(self, move):\n print(move, \"is not valid\")\n move = input(\"Please enter your move\")\n return move\n elif self.type == Player.RANDOM:\n move = choice(board.legalMoves(self))\n return move\n elif self.type == Player.MINIMAX:\n val, move = self.minimax_move(board, self.depth * 2,\n Player.MAX_PLAYER)\n board.last_move = move\n return move\n elif self.type == Player.ABPRUNE:\n val, move = self.alpha_beta_move(board, self.depth * 2,\n float('-inf'), float('inf'),\n Player.MAX_PLAYER)\n return move\n elif self.type == Player.CUSTOM:\n move = self.agent.getAction(board)\n self.agent.update_current_state(board, move)\n return move\n elif self.type == Player.MIX:\n return self.mixed_move(board)\n\n else:\n print(\"Unknown player type\")\n return -1", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def move(self, i, j):\n board_cpy = copy.deepcopy(self.board)\n piece = self.board[self.selected[0]][self.selected[1]]\n if piece == 0:\n return False\n elif piece.player != self.active_player:\n print(\"it's not your turn\", piece.player, self.active_player)\n return False\n elif self.board[i][j] != 0:\n if piece.player == self.board[i][j].player:\n print(\"can't capture your own pieces\")\n return False\n if i==self.selected[0] and piece.direction == 0:\n if piece.master == False:\n if (i + j - sum(self.selected))%2 == 0:\n print(\"must choose a different color\")\n return False\n if j < self.selected[1]:\n spaces_between = [self.board[i][y] for y in range(j+1, self.selected[1])]\n else:\n spaces_between = [self.board[i][y] for y in range(self.selected[1]+1, j)]\n print(spaces_between)\n for p in spaces_between:\n if p != 0:\n print(\"can't jump pieces\")\n return False\n piece.direction = 1\n self.board[i][j] = piece\n self.board[self.selected[0]][self.selected[1]] = 0\n elif j==self.selected[1] and piece.direction == 1:\n if piece.master == False:\n if (i + j - sum(self.selected))%2 == 0:\n print(\"must choose a different color\")\n return False\n if i < self.selected[0]:\n spaces_between = [self.board[x][j] for x in range(i+1, self.selected[0])]\n else:\n spaces_between = [self.board[x][j] for x in range(self.selected[0]+1, i)] \n print(spaces_between)\n for p in spaces_between:\n if p != 0:\n print(\"can't jump pieces\")\n return False\n piece.direction = 0\n self.board[i][j] = piece\n self.board[self.selected[0]][self.selected[1]] = 0\n else:\n print(\"Invalid movement\")\n return False\n if self.board[i][j] == 0:\n print(\"Something wrong happened\")\n return False\n elif (i==0 or i==self.board_size-1) and (j==0 or j==self.board_size-1):\n self.board[i][j].master = True\n self.active_player = (self.active_player+1)%2\n self.sequence += [board_cpy]\n return True", "def move(self, board):\n return self.prng.choice(board.available())", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def user_turn(self):\r\n\r\n self.display_state() # display the current state\r\n print(\r\n '\\nTURN: You -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\r\n # Get the row and col number of the card you want to select\r\n x1, y1 = self.input_validation('Enter the location of the first card you pick (row, col) -> ')\r\n self.selected = [x1, y1] # a temporary holder for the first choice\r\n\r\n # Get the corresponding card ID which is also the key for the dictionary with all the cards\r\n choice1_key = self.state[x1, y1]\r\n print('The card you selected: {0}'.format(self.deck[choice1_key]))\r\n\r\n # Repeat this for your second choice\r\n x2, y2 = self.input_validation('Enter the location of the second card you pick (row, col) -> ')\r\n self.selected = [-1, -1] # reset the temporary hold\r\n\r\n choice2_key = self.state[x2, y2]\r\n print('The card you selected: {0}'.format(self.deck[choice2_key]))\r\n\r\n # Check if the two cards are a match or not\r\n if self.check_card(self.deck[choice1_key], self.deck[choice2_key]):\r\n print('MATCH')\r\n # Replace the corresponding cards in the remaining inventory and state with -1\r\n self.remaining[choice1_key] = -1\r\n self.remaining[choice2_key] = -1\r\n self.state[x1, y1] = -1\r\n self.state[x2, y2] = -1\r\n self.player_cards += 2 # the player gets 2 cards\r\n self.bin.append([x1, y1]) # move the location of the card to the already-taken bin\r\n self.bin.append([x2, y2])\r\n self.forget_memory(choice1_key) # remove from computer's memory\r\n self.forget_memory(choice2_key)\r\n self.match = 1 # player will continue to choose cards\r\n else:\r\n print('NOT a match')\r\n # Add these cards to the computer's memory\r\n self.computer_memory[choice1_key] = [x1, y1]\r\n self.computer_memory[choice2_key] = [x2, y2]\r\n self.match = 0 # computer's turn\r", "def make_move(self, time_limit, players_score):\n turn_start_time = time.time()\n\n current_state = State(self.board, 1, self.board_min_len - self.played_turns, self.player_pos,\n self.rival_pos, self.player_score, self.rival_score, None)\n\n if current_state.fruit_remaining_turns > 0: # fruit stage\n # calculate average time for turn assuming all squares are reachable\n expected_remaining_turns = len(np.argwhere(np.logical_or(\n self.board == 0, self.board > 2))) / 2\n avg_turn_time = self.game_remaining_time / expected_remaining_turns\n turn_time_limit = avg_turn_time\n if self.played_turns < (self.board_min_len / 4):\n turn_time_limit *= 4\n\n else: # no fruit stage\n # calculate average time for turn, use the following heuristic because it\n # it does exactly what we need\n player_cc, rival_cc, is_same_cc = connected_components_heuristic(current_state, 1)\n expected_remaining_turns = np.min([player_cc, rival_cc])\n avg_turn_time = avg_turn_time = self.game_remaining_time / expected_remaining_turns\n turn_time_limit = avg_turn_time\n\n if not is_same_cc:\n self.heuristic_params[\"isDifferentCC\"] = True\n\n # give more time to do the best in the beginning of the stage if its not to dangarous:\n if current_state.fruit_remaining_turns > -2 and expected_remaining_turns >= 4:\n turn_time_limit *= 3\n \n print(\"avg remaining turn time\", avg_turn_time, \"(compete)\")\n depth = 1\n remaining_turn_time = turn_time_limit\n last_iteration_time = 0\n chosen_direction = (0, 1)\n while True:\n t = time.time()\n chosen_h, chosen_direction = self.algorithm.search(state=current_state,\n depth=depth,\n maximizing_player=1,\n penalty=self.penalty_score,\n heuristic_params=self.heuristic_params)\n last_iteration_time = time.time() - t\n remaining_turn_time -= last_iteration_time\n depth += 1\n if not(remaining_turn_time > last_iteration_time * 3 and depth < 50):\n break\n\n print(\"searched depth :\", depth - 1, \"(compete)\")\n new_player_pos = (self.player_pos[0] + chosen_direction[0], self.player_pos[1] + chosen_direction[1])\n self.board[self.player_pos] = -1\n self.player_score += self.board[new_player_pos]\n self.player_pos = new_player_pos\n self.board[new_player_pos] = 1\n\n self.game_remaining_time -= time.time() - turn_start_time\n return chosen_direction[0], chosen_direction[1]", "def _find_move(self, current, ply, difficulty_level, player):\n\n #check the score of this state\n node_score = current.get_score()\n #if this state is a win for either side, or if we're at the end of our difficulty depth level\n if ply == difficulty_level or node_score > 100000000 or node_score < -100000000: #base case\n return node_score * (difficulty_level+1-ply) #NOTE: I'm not sure if this is paranoid, but I want to make sure it doesn't multiply by 0 if we hit a win condition at the end of our search\n\n #recursive\n else:\n options = []\n #we're either player one (Min) or player two (Max)\n\n #for a column at c in the rack\n for c in range(WIDTH):\n\n #simulate a move in that column, making an attempt State\n attempt = current.simul_move(c, player)\n\n if attempt is not None: #if this produced a move\n if player == 1: next_player = 2\n else: next_player = 1\n\n #recurse down this attempted move\n attempt_score = self._find_move(attempt, ply+1, difficulty_level, next_player)\n #add the results of each column move into options\n options.append(attempt_score)\n if len(options) == 0: return 0\n #based on whether we're the current player or not, max (if we are) or min (if we aren't) and pass back the result\n if player == self.player_id: return max(options)\n else: return min(options)", "def winningMove():\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9\r\n\r\n\tnoWin=True\r\n\tmove=False\r\n\tif turn==\"Player1\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove=1\t\r\n\t\t\ttile1+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-1\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-1\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\t\r\n\t\t\ttile5+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=1\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\t\t\r\n\t\t\ttile9+=-1\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\r\n\telif turn==\"Player2\":\r\n\t\tif validMove(1):\r\n\t\t\ttile1+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 1\t\t\t\t\r\n\t\t\ttile1+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(2):\r\n\t\t\ttile2+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 2\r\n\t\t\ttile2+=-2\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\t\tif validMove(3):\r\n\t\t\ttile3+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 3\r\n\t\t\ttile3+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(4):\r\n\t\t\ttile4+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 4\t\r\n\t\t\ttile4+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(5):\r\n\t\t\ttile5+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 5\t\r\n\t\t\ttile5+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(6):\r\n\t\t\ttile6+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 6\t\r\n\t\t\ttile6+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(7):\r\n\t\t\ttile7+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 7\t\r\n\t\t\ttile7+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(8):\r\n\t\t\ttile8+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 8\t\r\n\t\t\ttile8+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\t\r\n\t\tif validMove(9):\r\n\t\t\ttile9+=2\r\n\t\t\tif win():\r\n\t\t\t\tnoWin=False\r\n\t\t\t\tmove= 9\r\n\t\t\ttile9+=-2\t\t\r\n\t\t\tif move:\r\n\t\t\t\treturn move\t\r\n\tif noWin:\r\n\t\treturn False", "def choose_action(self, board):\n options = board.empty_cells\n\n # In this game we look for winning possibilities\n for choice in options:\n # For each option play the option,\n # and observe the outcome\n new_board = copy.deepcopy(board)\n new_board.mark_cell(choice[0], choice[1], self._sign)\n # If a winning cell is found, occupy it\n if new_board.has_winner():\n return choice\n\n # In this loop we prevent loosing the game\n for choice in options:\n # For each option play the option,\n # and observe the outcome\n new_board = copy.deepcopy(board)\n new_board.mark_cell(choice[0], choice[1], self._get_opponent_sign())\n # If an opponent has a winning cell occupy it\n if new_board.has_winner():\n return choice\n\n # Otherwise pick randomly\n return random.choice(options)", "def player(board):\n #X ALWAYS gets first move, alternates with each additional move\n curr_moves = actions(board)\n if (board == initial_state()):\n return X\n if(len(curr_moves) % 2 == 0):\n return O\n else:\n return X", "def play(state, player_turn, human_marker, depth):\n alpha = -10\n beta = 10\n while True:\n draw_board(state)\n marker = is_terminal(state)\n\n if marker is not None:\n if marker == 'X':\n print(\"The winner is 'X'!\")\n elif marker == 'O':\n print(\"The winner is 'O'!\")\n else:\n print(\"The game ended in a tie!\")\n return\n\n # Presumably AI's turn.\n if player_turn == 0:\n ai_marker = 'X' if human_marker == 'O' else 'O'\n if ai_marker == 'X':\n value, move = max_value(state, ai_marker, depth, alpha, beta)[:2]\n else:\n value, move = min_value(state, ai_marker, depth, alpha, beta)[:2]\n depth = depth + 1\n state[move[0]][move[1]] = ai_marker\n player_turn = 1\n\n # Presumably human player's turn.\n else:\n move = list(map(int, input('Enter your move: ').strip('[]').split(',')))\n while not is_valid_move(state, move):\n move = list(map(int, input('Enter your move: ').strip('[]').split(',')))\n\n state[move[0]-1][move[1]-1] = human_marker\n depth = depth + 1\n player_turn = 0", "def step(self, board, possible_moves):\n\n self.sess.run(self.init)\n\n # Store the \"base\" state before board gets modified\n state = board.board_arr\n actions = board.available_white_moves()\n\n a_opt, allQ = self.find_optimal_move(board)\n\n # generates random action with probability epsilon\n if np.random.rand(1) < self.epsilon:\n a_opt = np.random.randint(0, len(actions), size=1)[0]\n\n # get new state and reward by executing preferred action\n board, reward = self.simulate(board, actions[a_opt])\n\n if self.first_step:\n self.first_step = False\n else:\n self.train(state, reward, allQ[a_opt])\n\n # save some subset of networks\n if self.train_step % self.SAVE_STEP_NUM == 0:\n self.saver.save(self.sess, self.SAVE_FILE)\n self.train_step += 1\n\n return a_opt", "def play_strategic_game():\n board, winner = create_board(), 0\n board[1,1] = 1\n while winner == 0:\n for player in [2,1]:\n board = random_place(board, player)\n winner = evaluate(board)\n if winner != 0:\n break\n return winner", "def UCTPlayGame(itermax):\r\n print(\"Welcome to Ultimate Tic-Tac-Toe!\")\r\n player = 2 if input(\"Do you want to go first? [Y/N]: \") == \"N\" else 1\r\n\r\n state = GameState()\r\n while state.GetMoves():\r\n currentPlayer = state.NextPlayer()\r\n\r\n print(str(state))\r\n print(\"Moves for player \" + str(currentPlayer) + \": \")\r\n print(np.matrix(state.GetMoves()), \"\\n\")\r\n\r\n if currentPlayer == player:\r\n m = None\r\n while m not in state.GetMoves():\r\n try:\r\n m = int(input(\"Your move: \"))\r\n except ValueError:\r\n continue\r\n # m = random.choice(state.GetMoves())\r\n else:\r\n m = UCT(rootstate=state, itermax=itermax, verbose=False)\r\n print(\"AI played: \" + str(m))\r\n state.DoMove(m)\r\n print(str(state))\r\n\r\n if state.GetResult(state.playerJustMoved) == 1.0:\r\n print(\"Player \" + str(state.playerJustMoved) + \" wins!\")\r\n return state.playerJustMoved\r\n elif state.GetResult(state.playerJustMoved) == 0.0:\r\n print(\"Player \" + str(state.NextPlayer()) + \" wins!\")\r\n return state.NextPlayer()\r\n else:\r\n print(\"Nobody wins!\")\r\n return 0", "def minimax(board):\r\n player_moving = player(board)\r\n\r\n\r\n if board == [[EMPTY] * 3] * 3:\r\n return (0, 0)\r\n\r\n if player_moving == X:\r\n value = -math.inf\r\n selected_action = None\r\n for action in actions(board):\r\n minValueResult = minValue(result(board, action))\r\n if minValueResult > value:\r\n value = minValueResult\r\n selected_action = action\r\n elif player_moving == O:\r\n value = math.inf\r\n selected_action = None\r\n for action in actions(board):\r\n maxValueResult = maxValue(result(board, action))\r\n if maxValueResult < value:\r\n value = maxValueResult\r\n selected_action = action\r\n\r\n return selected_action", "def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently", "def choose_move(self, possible_moves, seconds_left):\n # TODO: update this method\n print('\\--------------Choose Move--------------/')\n print(possible_moves)\n print(list(self.current_board.legal_moves))\n search_tree = MCTS(5, self.color, self.current_board)\n search_tree.search()\n move = search_tree.pick_move()['move']\n\n return move", "def get_ai_move_minimax(self, gamestate, depth, current_player):\n multiplier = 1 if current_player else -1\n # Base case\n if depth == 0:\n return (\n self.evaluate_board(\n gamestate.board,\n gamestate.white_checkmate,\n gamestate.black_checkmate,\n gamestate.stalemate,\n )\n * multiplier\n )\n\n best_score = self.BLACK_CHECKMATE\n for move in gamestate.get_valid_moves():\n # Execute the move\n (current_row, current_column), (new_row, new_column) = move\n current_piece = gamestate.board.board[current_row][current_column]\n piece_at_new_square = gamestate.board.board[new_row][new_column]\n\n gamestate.board.board[current_row][current_column] = None\n gamestate.board.board[new_row][new_column] = current_piece\n\n # Update King's location\n if isinstance(current_piece, King):\n if current_player:\n gamestate.white_king_location = (new_row, new_column)\n else:\n gamestate.black_king_location = (new_row, new_column)\n\n # Execute pawn promotion\n elif isinstance(current_piece, Pawn):\n if current_piece.colour and new_row == 0:\n new_piece = Queen(new_row, new_column, True)\n gamestate.board.white_pieces.remove(current_piece)\n gamestate.board.white_pieces.append(new_piece)\n gamestate.board.board[new_row][new_column] = new_piece\n\n elif not current_piece.colour and new_row == 7:\n new_piece = Queen(new_row, new_column, False)\n gamestate.board.black_pieces.remove(current_piece)\n gamestate.board.black_pieces.append(new_piece)\n gamestate.board.board[new_row][new_column] = new_piece\n\n # Switch player\n gamestate.current_player_colour = not gamestate.current_player_colour\n\n if current_piece is None:\n return self.BLACK_CHECKMATE\n\n current_piece.row = new_row\n current_piece.column = new_column\n\n if piece_at_new_square:\n if piece_at_new_square.colour:\n gamestate.board.white_pieces.remove(piece_at_new_square)\n else:\n gamestate.board.black_pieces.remove(piece_at_new_square)\n\n gamestate.is_checkmate_or_stalemate()\n gamestate.check_draw()\n\n # Evaluate score for gamestate recursively\n score = -1 * self.get_ai_move_minimax(\n gamestate, depth - 1, not current_player\n )\n\n # Undo the move\n gamestate.board.board[current_row][current_column] = current_piece\n gamestate.board.board[new_row][new_column] = piece_at_new_square\n\n if isinstance(current_piece, King):\n if current_player:\n gamestate.white_king_location = (current_row, current_column)\n else:\n gamestate.black_king_location = (current_row, current_column)\n\n # Undo pawn promotion\n elif isinstance(current_piece, Pawn):\n if current_piece.colour and new_row == 0:\n gamestate.board.white_pieces.append(current_piece)\n gamestate.board.white_pieces.remove(new_piece)\n elif not current_piece.colour and new_row == 7:\n gamestate.board.black_pieces.append(current_piece)\n gamestate.board.black_pieces.remove(new_piece)\n\n # Switch player back\n gamestate.current_player_colour = not gamestate.current_player_colour\n\n current_piece.row = current_row\n current_piece.column = current_column\n\n if piece_at_new_square:\n if piece_at_new_square.colour:\n gamestate.board.white_pieces.append(piece_at_new_square)\n else:\n gamestate.board.black_pieces.append(piece_at_new_square)\n\n gamestate.white_checkmate = False\n gamestate.black_checkmate = False\n gamestate.stalemate = False\n\n # Check if best score and update list of best moves\n if score > best_score:\n best_score = score\n if depth == self.DEPTH:\n self.minimax_best_moves = [move]\n\n elif score == best_score and depth == self.DEPTH:\n self.minimax_best_moves.append(move)\n\n return best_score", "def make_move(self, time_limit, players_score):\n \n start_time = time.time()\n d = 1 \n \n reach_the_end = False\n best_direction = None\n chosen_state = None\n \n time_limit = (2 * self.game_time * float(self.player_turns - self.turns + 1)) / ((self.player_turns + 1) * self.player_turns)\n time_limit += self.spaire_time\n\n if time_limit >= 5:\n TIME_ESTIMATION = 0.9 \n else:\n TIME_ESTIMATION = 0.85\n\n while not reach_the_end: \n \n iter_time_limit = TIME_ESTIMATION * ( time_limit - (time.time() - start_time) )\n \n state = State(get_directions(),self.board,self.locations,self.fruits_on_board_dict,PLAYER,players_score,self.penalty_score,self.fruits_ttl,self.turns)\n\n try:\n _, best_direction, reach_the_end,chosen_state = self.alphabeta.search(state,d,True,iter_time_limit,alpha=float('-inf'), beta=float('inf'))\n d += 1\n except Exception as e:\n self.spaire_time = time_limit - (time.time() - start_time)\n break\n \n # Set new location \n if best_direction == None:\n best_direction = self.get_random_move() \n self.set_player_location(best_direction)\n \n self.turns += 1\n return best_direction", "def determine_best_move(self, board):\r\n\r\n # Check if the AI is smart, run a smart version of the AI\r\n if self.is_smart:\r\n\r\n # Iterate through every column\r\n # keep track of any restricted moves (moves that may cause the challenger to win)\r\n # and keep track of the final result of where the AI should move\r\n restricted = []\r\n ai_move = -1\r\n for column in range(board.width):\r\n\r\n # Create two copies of the board to emulate AI moves and player moves\r\n ai_copy = board.copy()\r\n player_copy = board.copy()\r\n\r\n # Check if the current column is full, move onto the next column\r\n if board.is_column_full(column):\r\n continue\r\n\r\n # Column is not full; Emulate AI and player moves at this column\r\n ai_copy.add_piece(column, is_challenger = False) # AI move\r\n player_copy.add_piece(column, is_challenger = True) # Player move\r\n\r\n # Check if either the ai_copy or player_copy has a win in it\r\n ai_win_check = ai_copy.check_for_winner()\r\n player_win_check = player_copy.check_for_winner()\r\n\r\n # If either board has a win in it, make that the AI move\r\n # if the player would go to this current column in their next move\r\n # they would win, the AI should try to stop it\r\n # if the ai would go to this current column in its next move\r\n # they would win, the AI should immediately go here\r\n if ai_win_check == False or player_win_check == True:\r\n ai_move = column\r\n break\r\n\r\n # Neither of the moves would win in either board,\r\n # emulate the next moves on the same column\r\n else:\r\n\r\n # Check if the column is full, move onto the next column\r\n if ai_copy.is_column_full(column):\r\n continue\r\n\r\n # Column is not full, emulate the player move on the AI copy\r\n ai_copy.add_piece(column, is_challenger = True)\r\n\r\n # Check if the player would win; If so, do not let the AI go to this column\r\n player_win_check = ai_copy.check_for_winner()\r\n if player_win_check == True:\r\n restricted.append(column)\r\n\r\n # There has been no ai_move generated yet\r\n # generate a random column\r\n if ai_move == -1:\r\n\r\n # Check if the board is full, there must be a draw\r\n if board.is_board_full():\r\n return False\r\n\r\n # The board is not full, generate a random column that is not full\r\n while True:\r\n ai_move = randint(0, board.width - 1)\r\n\r\n # Check if the column is full, continue generating a random column\r\n if board.is_column_full(ai_move):\r\n continue\r\n\r\n # Check to see if this is the only available column to go to\r\n # or check to see if this column is a restricted move\r\n elif board.board[0].count(None) == 1 or len(restricted) == board.board[0].count(None):\r\n break\r\n\r\n # Check to see if the move is not a restricted move\r\n elif ai_move not in restricted:\r\n break\r\n\r\n # The AI is not smart, choose a random place\r\n else:\r\n ai_move = randint(0, board.width - 1)\r\n while board.is_column_full(ai_move):\r\n ai_move = randint(0, board.width - 1)\r\n\r\n # Make the AI go to its chosen move\r\n board.add_piece(ai_move, is_challenger = False)\r\n return True", "def game(gametype=1):\r\n\r\n\tglobal turn, tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9, move1, move2\r\n\r\n\ttile1= 0 \r\n\ttile2= 0\r\n\ttile3= 0\r\n\ttile4= 0\r\n\ttile5= 0\r\n\ttile6= 0\r\n\ttile7= 0\r\n\ttile8= 0\r\n\ttile9= 0\r\n\r\n\tmove1= 0 \r\n\tmove2= 0 \r\n\tturn=\"Player1\"\r\n\r\n\twinner=0\r\n\tif gametype==1:\r\n\t\twhile (not(win()) and validBoard()):\r\n\t\t\tturn=\"Player1\"\r\n\t\t\tmove=takeNaiveMove()\r\n\t\t\tupdateMove(move)\r\n\t\t\tmove1+=1\r\n\t\t\tif win():\r\n\t\t\t\twinner=1\r\n\t\t\t\tbreak\r\n\t\t\tif move1+move2==9:\r\n\t\t\t\t\tbreak\t\r\n\t\t\tturn=\"Player2\"\r\n\t\t\tmove=takeNaiveMove()\r\n\t\t\tupdateMove(move)\r\n\t\t\tmove2+=1\r\n\t\t\tif win():\r\n\t\t\t\twinner=2\r\n\t\tif validBoard():\t\t\t\r\n\t\t\treturn winner\r\n\t\telse:\r\n\t\t\treturn \"Error in board\"\t\r\n\t\t \r\n\telif gametype==2:\r\n\t\twhile(not(win()) and validBoard()):\r\n\t\t\tturn=\"Player1\"\r\n\t\t\tmove=takeNaiveMove()\r\n\t\t\tupdateMove(move)\r\n\t\t\tmove1+=1\r\n\t\t\tif win():\r\n\t\t\t\twinner=1\r\n\t\t\t\tbreak\r\n\t\t\tif move1+move2==9:\r\n\t\t\t\tbreak\t\r\n\t\t\tturn=\"Player2\"\r\n\t\t\tmove=takeStrategicMove()\r\n\t\t\tupdateMove(move)\r\n\t\t\tmove2+=1\r\n\t\t\tif win():\r\n\t\t\t\twinner=2\r\n\t\tif validBoard():\r\n\t\t\treturn winner\t\t\t\r\n\t\telse:\r\n\t\t\treturn \"Error in board\"\t\t\t\r\n\telse:\r\n\t\twhile(not(win()) and validBoard()):\r\n\t\t\tturn=\"Player1\"\r\n\t\t\tmove=takeStrategicMove()\r\n\t\t\tupdateMove(move)\r\n\t\t\tmove1+=1\r\n\t\t\tif win():\r\n\t\t\t\twinner=1\r\n\t\t\t\tbreak\r\n\t\t\tif move1+move2==9:\r\n\t\t\t\tbreak\t\t\r\n\t\t\tturn=\"Player2\"\r\n\t\t\tmove=takeStrategicMove()\r\n\t\t\tupdateMove(move)\r\n\t\t\tmove2+=1\r\n\t\t\tif win():\r\n\t\t\t\twinner=2\r\n\t\tif validBoard():\r\n\t\t\treturn winner\t\t\t\r\n\t\telse:\r\n\t\t\treturn \"Error in board\"", "def choose_move(self):\n return 0", "def TicTacToe(): #Written by Cody West\n current_board = [\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"] #Empty board\n players = 0 #Number of players\n human_turn = 0 #Indicates whether the human goes first or second (is 0 for two player games)\n turn = 1 #Turn number\n while players != 1 and players != 2: #While a valid number of players has not been chosen\n players = int(raw_input(\"How many players are there?\")) #Asks how many players there are\n if players < 1 or players > 2: #If the choice is not valid\n print(\"Please pick 1 or 2 players\") #Prints error message\n if players == 1: #If 1 player\n difficulty = 0 #Difficulty variable\n while difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #While a valid difficulty has not been chose\n difficulty = int(raw_input(\"Pick a difficulty. 1 is easiest, 4 is hardest\")) #Ask for a difficulty\n if difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #If difficulty choice is not valid\n print(\"Please pick a difficulty between 1 and 4\") #Prints error message\n while human_turn != 1 and human_turn != 2: #While a human turn has not been chosen\n human_turn = int(raw_input(\"Would you like to go first (1) or second (2)?\")) #Ask for human turn\n if human_turn != 1 and human_turn != 2: #If a valid turn is not chosen\n print(\"Please pick turn 1 or 2\") #Print error message\n if human_turn == 1: #If human goes first\n player1 = \"human\" #Player 1 is human\n player2 = \"AI\" #Player 2 is AI\n elif human_turn == 2: #If human goes second\n player1 = \"AI\" #Player 1 is AI\n player2 = \"human\" #Player 2 is human\n else: #If neither\n player1 = \"human\" #Player 1 is human\n player2 = \"human\" #Player 2 is human\n while turn < 10: #While the number of turns in Tic Tac Toe has not been exceeded\n if turn < 3: #For the first three turns\n draw_example_board() #Draw a board showing the slot numbers\n draw_board(current_board) #Draw current board\n ## You could write this logic much more compactly -- try to avoid having so many\n ## lines of code that look identical. You have four different update_board calls\n ## here where you could have just one.\n if turn%2 == 1: #If it's an odd numbered turn\n if player1 == \"human\":\n print(\"human\")\n update_board(current_board, get_input(current_board, turn), \"X\") #Update board with player 1's selection and X\n else:\n print(\"AI\")\n update_board(current_board, AI(current_board,\"X\",\"O\", difficulty), \"X\") #Update board with AI selection\n else:\n if player2 == \"human\":\n print(\"human\")\n update_board(current_board, get_input(current_board, turn), \"O\") #Update board with player 2's selection and X\n else:\n print(\"AI\")\n update_board(current_board, AI(current_board,\"O\",\"X\", difficulty), \"O\") #Update board with AI selection\n if check_victory(current_board) == \"done\":\n return \"whatever\"#Check victory\n turn = turn + 1 #Increase turn number", "def play_against_minimax():\n global FIRST_MOVE\n global done\n done = False\n g = Game()\n turn = np.random.randint(2)\n # if turn == RED:\n # FIRST_MOVE = False\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n # print(g.board)\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n obs = np.zeros((6, 7))\n for row, sublist in enumerate(g.board):\n for col, i in enumerate(sublist):\n observation.append(i)\n obs[col, row] = i\n\n observation = np.asarray(observation)\n action, _ = minimax(np.flipud(obs), 5, -math.inf, math.inf, True)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = (turn + 1) % 2\n return", "def minimax(board):\n\n current_player = player(board)", "def result(board, action):\n # Ensure manipulations of hypothetical board don't alter current board values\n possible_board = copy.deepcopy(board)\n current_player = player(possible_board)\n\n # Generate boards for all possible moves by current player\n if action in actions(possible_board):\n possible_board[action[0]][action[1]] = current_player\n return possible_board\n\n raise Exception(\"Invalid move.\")", "def choose_move(game_state):\n # https://en.wikipedia.org/wiki/Nim#Winning_positions\n nim_sum = 0\n for val in game_state:\n nim_sum ^= val\n if nim_sum == 0:\n raise Exception(\"You can't win because the nim sum is zero.\")\n for i, val in enumerate(game_state):\n if (val ^ nim_sum) < val:\n return (i, val - (val ^ nim_sum))\n raise Exception(\"You can't win because no pile is suitable.\")", "def computer_play(self):\r\n # Depending on game flow, helped randomize when smack showed up\r\n # This is more of an Easter Egg than anything.\r\n if (self.tr.disks_on_board != 0 and (self.tr.disks_on_board % 6 == 0 or\r\n self.tr.disks_on_board % 6 == 3) and self.tr.turn_tracker):\r\n self.ai.talk_smack()\r\n # Computer identifies possible moves to analyze\r\n for item in self.tr.computer_moves:\r\n self.ai.coordinate_extractor(item)\r\n # Computer chooses move\r\n choice = self.ai.choose_move()\r\n # Makes play\r\n choice = self.tr.bd.disks[choice[0]][choice[1]]\r\n self.ai.moves_reset()\r\n choice.color, choice.display_on = 1, True\r\n choice.chain()\r\n # Checks for player move, if none, checks for another move\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n # If none, ends game\r\n else:\r\n if not self.tr.game_over:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def next_move(ttt):\r\n # get board in 2D array form\r\n b = ttt.get_board()\r\n \r\n # if there's a winning move, take it\r\n (cfw, win_move) = check_for_win_lose(b)\r\n if cfw is not None:\r\n if win_move:\r\n print 'COMPUTER WINS!'\r\n return cfw, win_move\r\n # otherwise, pres on with the next best move\r\n\r\n # get \"points\" on board. this tells us not only the move\r\n # but also who went first\r\n board_count = sum(sum(b,[]))\r\n \r\n # IF COMPUTER HAS FIRST TURN\r\n # if 1st move\r\n if board_count == 0:\r\n return (2,2), False # take the center\r\n # this is not best strategy for winning, but\r\n # it the human messes up, the computer can win.\r\n # taking a corner first makes it a little easier\r\n # for the computer to win becase the human only\r\n # has one correct move to make: to take the center\r\n \r\n # if 3rd move, and not a winning one\r\n if board_count == 3:\r\n if b[0][1]==2 or b[1][0]==2 or b[0][0]==2:\r\n return (3,3), False\r\n elif b[0][2]==2:\r\n return (3,1), False\r\n elif b[2][0]==2:\r\n return (1,3), False\r\n else:#elif b[1][2]==2 or b[2][1]==2 or b[2][2]==2:\r\n return (1,1), False\r\n\r\n # if 5th move, and not a winning or losing one\r\n if board_count == 6:\r\n b5 = numpy.array([[0,2,1],[0,1,0],[2,0,0]])\r\n if (b == b5).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (3,1), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (1,3), False\r\n\r\n b5 = numpy.array([[0,0,1],[0,1,2],[2,0,0]])\r\n if (b == b5).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (1,3), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (3,1), False\r\n\r\n # at this point, all possible boards should have been covered\r\n\r\n # if 7th move, and a winning or losing one\r\n if board_count == 9:\r\n # find the row or col with 2 open slots and mark it\r\n for ri in range(3):\r\n r = b[ri]\r\n if sum([1 if i==0 else 0 for i in r]) == 2:\r\n if r[0] == 0:\r\n return (ri+1,1), False\r\n else:\r\n return (ri+1,2), False\r\n for ci in range(3):\r\n c = get_col(b, ci)\r\n if sum([1 if i==0 else 0 for i in c]) == 2:\r\n if c[0] == 0:\r\n return (1,ci+1), False\r\n else:\r\n return (2,ci+1), False\r\n\r\n \r\n # IF HUMAN HAS FIRST TURN\r\n # if 2nd move\r\n if board_count == 2:\r\n if b[1][1] == 0:\r\n # if the center is open, computer has\r\n # to take it in order to not lose\r\n return (2,2), False\r\n else:\r\n # otherwise take a corner\r\n return (1,1), False\r\n\r\n # if 4th move\r\n if board_count == 5:\r\n # if we took a corner on move 2 and they\r\n # are using computer's offensive strategy\r\n # when it is first player\r\n b4 = [[1,0,0],[0,2,0],[0,0,2]]\r\n if b==b4:\r\n return (3,1), False\r\n # if we took center on move 2\r\n else:\r\n b4 = numpy.array([[2,0,0],[0,1,0],[0,0,2]])\r\n if (b == b4).all() or (b == numpy.rot90(b4,1)).all():\r\n return (1,2), False\r\n\r\n # overall ELSE -- just find a square\r\n for ri in range(3):\r\n for ci in range(3):\r\n if b[ri][ci] == 0:\r\n return (ri+1,ci+1), False", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def play_move(self,state):\n #Keep asking for the next move until a valid move.\n while(True):\n childList = state.get_successors()\n print(\"Your possible moves:\")\n i = 0\n for c in childList:\n if i > 0 and i%4 == 0:\n print()\n print(c.get_action().ljust(10),end=\"\\t\");\n i += 1\n print()\n nextMove = input(\"What is your next move? \\ne.g.'F2-E3' or 'Quit'\\n\")\n #Check if the move is valid\n if nextMove.lower() == 'Quit'.lower():\n return None\n for c in childList:\n if c.get_action().upper() == nextMove.upper():\n return c\n # Move not possible \n print(\"Invalid move!! Please try again...\\n\")", "def start_state():\n return chess.Board()", "def next_turn(self): \n if (self.moves):\n self.board = self.select_move() \n self.moves = []\n self.roll = self.roll_dice()\n self.player = not self.player\n self.generate_valid_moves()", "def ai_3(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is not None:\n moved = False\n for (x,y) in board.open_spots:\n move = find_win_spot(cur_piece, board)\n if move:\n board[move] = board.cpiece_id\n moved = True\n break\n if not moved:\n board[choice(list(board.open_spots))] = board.cpiece_id\n board.cpiece_id = choose_none_winable_piece(board)\n else:\n board.cpiece_id = choose_none_winable_piece(board)\n\n if (board.cpiece_id is None) and not board.is_full:\n board.cpiece_id, _ = choice(list(board.unused_game_pieces))\n return board", "def decide(self, state: OthelloState, actions: list):\n # -------- TASK 2 ------------------------------------------------------\n # Your task is to implement an algorithm to choose an action form the\n # given `actions` list. You can implement any algorithm you want.\n # However, you should keep in mind that the execution time of this\n # function is limited. So, instead of choosing just one action, you can\n # generate a sequence of increasing good action.\n # This function is a generator. So, you should use `yield` statement\n # rather than `return` statement. To find more information about\n # generator functions, you can take a look at:\n # https://www.geeksforgeeks.org/generators-in-python/\n #\n # If you generate multiple actions, the last action will be used in the\n # game.\n #\n # Tips\n # ====\n # 1. During development of your algorithm, you may want to find the next\n # state after applying an action to the current state; in this case,\n # you can use the following patterns:\n # `next_state = current_state.successor(action)`\n #\n # 2. If you need to simulate a game from a specific state to find the\n # the winner, you can use the following pattern:\n # ```\n # simulator = Game(FirstAgent(), SecondAgent())\n # winner = simulator.play(starting_state=specified_state)\n # ```\n # The `MarkovAgent` has illustrated a concrete example of this\n # pattern.\n #\n # 3. You are free to choose what kind of game-playing agent you\n # implement. Some of the obvious approaches are the following:\n # 3.1 Implement alpha-beta (and investigate its potential for searching deeper\n # than what is possible with Minimax). Also, the order in which the actions\n # are tried in a given node impacts the effectiveness of alpha-beta: you could\n # investigate different ways of ordering the actions/successor states.\n # 3.2 Try out better heuristics, e.g. ones that take into account the higher\n # importance of edge and corner cells. Find material on this in the Internet.\n # 3.3 You could try out more advanced Monte Carlo search methods (however, we do\n # not know whether MCTS is competitive because of the high cost of the full\n # gameplays.)\n # 3.4 You could of course try something completely different if you are willing to\n # invest more time.\n #\n # GL HF :)\n # ----------------------------------------------------------------------\n\n # Replace the following lines with your algorithm\n best_action = actions[0]\n yield best_action", "def move(self, board):\n winning_move = self.find_winning_move(board)\n if winning_move != -1:\n return winning_move\n\n blocking_move = self.find_blocking_move(board)\n if blocking_move != -1:\n return blocking_move\n\n if board[4] == \"4\": # center square is open\n return 4\n else:\n return self.prng.choice(board.available())", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def get_greedy_ai_move(self, gamestate):\n multiplier = 1 if gamestate.ai_colour else -1\n best_moves = []\n best_score = self.BLACK_CHECKMATE\n for move in gamestate.get_valid_moves():\n # Execute the move\n (current_row, current_column), (new_row, new_column) = move\n current_piece = gamestate.board.board[current_row][current_column]\n piece_at_new_square = gamestate.board.board[new_row][new_column]\n\n gamestate.board.board[current_row][current_column] = None\n gamestate.board.board[new_row][new_column] = current_piece\n\n if piece_at_new_square:\n if piece_at_new_square.colour:\n gamestate.board.white_pieces.remove(piece_at_new_square)\n else:\n gamestate.board.black_pieces.remove(piece_at_new_square)\n\n # Evaluate the new board state\n score = (\n self.evaluate_board(\n gamestate.board,\n gamestate.white_checkmate,\n gamestate.black_checkmate,\n gamestate.stalemate,\n )\n * multiplier\n )\n\n # Undo the move\n gamestate.board.board[current_row][current_column] = current_piece\n gamestate.board.board[new_row][new_column] = piece_at_new_square\n\n if piece_at_new_square:\n if piece_at_new_square.colour:\n gamestate.board.white_pieces.append(piece_at_new_square)\n else:\n gamestate.board.black_pieces.append(piece_at_new_square)\n\n # Check if best move\n if score > best_score:\n best_score = score\n best_moves = [move]\n elif score == best_score:\n best_moves.append(move)\n\n # If there are no best moves, return a random move.\n if best_moves:\n return self.get_random_move(best_moves)\n return self.get_random_move(gamestate.get_valid_moves())", "def computer_turn(self):\r\n\r\n print(\r\n '\\nTURN: Computer -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=')\r\n\r\n # Scan through memory to see if the computer already knows a matching pair\r\n loc1, loc2 = self.computer_scan_memory()\r\n\r\n if loc1 and loc2: # when there is a pair inside the computer's memory\r\n x1, y1 = loc1\r\n x2, y2 = loc2\r\n\r\n # Check point\r\n assert x1 != None and y1 != None, 'x1 or y1 is None type'\r\n assert x2 != None and y2 != None, 'x2 or y2 is None type'\r\n\r\n choice1_key = self.state[x1, y1]\r\n choice2_key = self.state[x2, y2]\r\n else: # when there is no pair inside the computer's memory\r\n # Randomly select one card then scan memory\r\n x1, y1 = self.computer_random_select()\r\n choice1_key = self.state[x1, y1]\r\n\r\n # Scan through memory\r\n loc = self.computer_scan_memory(pick=choice1_key)\r\n\r\n if loc and (x1 != loc[0] or y1 != loc[1]): # there is a common value card in the computer's memory\r\n x2, y2 = loc # and that memory is not the same as the first choice\r\n\r\n # Check point\r\n assert x2 != None and y2 != None, 'x2 or y2 is None type'\r\n\r\n choice2_key = self.state[x2, y2]\r\n else: # There is no common value in the computer's memory\r\n while True: # select a card different from the first choice\r\n x2, y2 = self.computer_random_select()\r\n if x2 != x1 or y2 != y1:\r\n break\r\n choice2_key = self.state[x2, y2]\r\n\r\n print('First choice: {0} ({1}, {2})'.format(self.deck[choice1_key], x1, y1))\r\n print('Second choice: {0} ({1}, {2})'.format(self.deck[choice2_key], x2, y2))\r\n\r\n # Check if it is a match or not\r\n if self.check_card(self.deck[choice1_key], self.deck[choice2_key]):\r\n print('MATCH')\r\n # Replace the corresponding cards in the remaining inventory and current state with -1\r\n self.remaining[choice1_key] = -1\r\n self.remaining[choice2_key] = -1\r\n self.state[x1, y1] = -1\r\n self.state[x2, y2] = -1\r\n self.computer_cards += 2 # the computer gets 2 cards\r\n self.bin.append([x1, y1]) # move the location of the card to the already-taken bin\r\n self.bin.append([x2, y2])\r\n self.forget_memory(choice1_key) # remove from computer's memory\r\n self.forget_memory(choice2_key)\r\n self.match = 0 # The computer will continue to choose cards\r\n else:\r\n print('NOT a match')\r\n # Add these cards to the computer's memory\r\n self.computer_memory[choice1_key] = [x1, y1]\r\n self.computer_memory[choice2_key] = [x2, y2]\r\n self.match = 1 # The player's turn\r", "def set_moves(difficulty: int) -> int:\n if difficulty == 0:\n return 5\n elif difficulty == 1:\n return 10\n elif difficulty == 2:\n return 25\n elif difficulty == 3:\n return 50\n elif difficulty == 4:\n return 100\n else:\n return 150", "def get_move(self, board, possible_moves, player_1_or_2):\n\n # Given a Tic-Tac-Toe 3x3 board position where 1 => current player's square,\n # -1 => opponent's square, 0 => blank square,\n # this will return the current player's best move [as the x and y indexes into \n # the board array.]\n # The second input parameter, player_1_or_2, is 1 or -1 to indicate which player's\n # move it is. \n \n print('RL ~ Current player 1 or 2 (= -1):', player_1_or_2)\n \n print('RL ~ Current board: ')\n print(board)\n \n print('RL ~ possible_moves:', possible_moves)\n\n next_move = () \n\n # This will be the best move i.e. the move with the current\n # value of highest winning probability except when it is making exploratory\n # (as opposed to greedy) moves.\n\n next_move = self.board_position_states.get_next_move(board, possible_moves, self.current_player)\n\n next_move_location_tuple = possible_moves[next_move]\n board[next_move_location_tuple] = self.current_player\n\n self.list_board_positions_moved_to.append(board.copy()) # This board that we are\n # appending here could be changed by the next line of code, for example.\n # Hence we need to make a copy\n\n board[next_move_location_tuple] = 0 # undo the move in case it affects the calling method.\n\n return next_move", "def result(self, state, action):\r\n\r\n sc = copy.deepcopy(state)\r\n new_piece, player = self.new_or_old_piece(state)\r\n current_player, to_action, from_action = action\r\n\r\n # Make the move\r\n sc[to_action[0]][to_action[1]] = current_player\r\n\r\n # There can't be more than 6 pieces in any state.\r\n if not new_piece:\r\n # Now making from place as null again\r\n sc[from_action[0]][from_action[1]] = '-'\r\n\r\n return sc", "def takeStrategicMove():\r\n\tglobal move1, move2\r\n\r\n\tif move1==0 or move2==0:\r\n\t\tif validMove(1):\r\n\t\t\treturn 1\r\n\t\telif validMove(5):\r\n\t\t\treturn 5\r\n\telif winningMove():\r\n\t\treturn winningMove()\t\t\r\n\telif blockingMove():\r\n\t\treturn blockingMove()\r\n\telse:\r\n\t\treturn takeNaiveMove()", "def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())", "def get_best_moves():\n game_data = set()\n\n def next_move(game):\n winner = game.get_winner()\n if winner is not None:\n return {winner}\n next_wins = set()\n future_wins = set()\n future_draws_and_wins = set()\n future_draws = set()\n results = set()\n for row, col in game.valid_spaces():\n next_game = game.move(row, col)\n next_results = next_move(next_game)\n results |= next_results\n if game.turn == 2 and next_game.get_winner() == 2:\n # If Player 2 can win with their move, reject this line\n return {2}\n elif game.turn == 1 and next_game.get_winner() == 1:\n # If Player 1 can win with their move, only accept these lines\n next_wins.add((tuple([tuple(row) for row in game.board]), row * 3 + col))\n elif game.turn == 1:\n if next_results == {1}:\n # Player 1 will only win in this future\n future_wins.add((tuple([tuple(row) for row in game.board]), row * 3 + col)) \n elif next_results == {0, 1}:\n # Player 1 could win or draw in this future\n future_draws_and_wins.add((tuple([tuple(row) for row in game.board]), row * 3 + col))\n elif next_results == {0}:\n # Player 1 could only draw in this future\n future_draws.add((tuple([tuple(row) for row in game.board]), row * 3 + col))\n # We only accept the draws if we don't have any just wins\n if game.turn == 2:\n return results\n\n if next_wins:\n game_data.update(next_wins)\n return {1}\n elif future_wins:\n game_data.update(future_wins)\n return {1}\n elif future_draws_and_wins:\n game_data.update(future_draws_and_wins)\n return {0, 1}\n elif future_draws:\n game_data.update(future_draws)\n return {0}\n return set()\n\n next_move(TTTGame())\n next_move(TTTGame(turn=2))\n return tuple(game_data)", "def choose_action(self, board):\n options = board.empty_cells\n # to allow exploration, have a small probability of a random move\n p_random = random.random()\n # if the state is not in the table add it\n if (self.sign, board.state) not in self.Q_table.keys() or p_random < self.epsilon:\n values = {}\n for option in options:\n values[option] = random.random()\n self.Q_table[(self.sign, board.state)] = values\n self.action = random.choice(options)\n else:\n values = self.Q_table[(self.sign, board.state)]\n action = max(values, key=values.get)\n self.action = action\n\n # decrease exploration after each action\n if self.epsilon > 0:\n self.epsilon -= 0.0001\n\n return self.action", "def ai_1(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is not None:\n for (x,y) in board.open_spots:\n move = find_win_spot(cur_piece, board)\n if move:\n return update_board_then_give_random(board, move)\n board.ai_random_move()\n return board", "def computer_play( game ):\n\n grid = game.get_grid()\n\n diag = game.checkDiagonals()\n row = game.checkRows()\n column = game.checkColumns()\n\n if isinstance(diag, tuple):\n \n for x in diag[1]:\n try:\n x = int(x)\n print(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue\n\n elif isinstance(row, tuple):\n\n for x in row[1]:\n try:\n x = int(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue\n\n elif isinstance(column, tuple):\n\n for x in column[1]:\n try:\n x = int(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue \n\n for x in list(range(1,10)):\n if game.set_mark('O', x):\n return\n else:\n continue", "def get_available_moves(self, board):\n pass", "def get_legal_moves(self, player: int) -> np.ndarray:\n stage2 = self.is_stage2()\n action_mask = np.zeros((24, 5, 25), dtype=bool)\n # if stage 1 add set options\n array_board = np.array(self.board)\n if not stage2:\n legal_pos = np.where(array_board == 0)[0]\n for pos in legal_pos:\n if self.is_mill(player, pos, self.board): # current selection completes a mill\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if\n not self.is_mill(-player, opp_p, self.board)] # can't remove opponent in mill\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[pos, -1, opp_pos] = True\n else:\n action_mask[pos, -1, -1] = True # place piece on board\n else:\n from_pos_cands = np.where(array_board == player)[0]\n for from_pos in from_pos_cands:\n mill_cands = [(orient, adj) for orient, adj in enumerate(self.adjacent[from_pos]) if\n adj is not None and self.board[adj] == 0] # TODO added not, need to validate\n if_played_board = self.board.copy()\n if_played_board[from_pos] = 0\n for (orient, adj) in mill_cands:\n if self.is_mill(player, adj, if_played_board):\n opp_pos = np.where(array_board == -player)[0]\n opp_pos = [opp_p for opp_p in opp_pos if not self.is_mill(-player, opp_p, if_played_board)]\n if len(opp_pos) == 0: # exception for the case where all opponents pieces are in mills\n opp_pos = np.where(array_board == -player)[0]\n action_mask[from_pos, orient, opp_pos] = True\n else:\n action_mask[from_pos, orient, -1] = True\n\n return action_mask", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, -1, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 5, False, {}\n else:\n return state, -100, True, {}", "def player(board):\n # Check if board is in initial_state\n if board == initial_state():\n return X \n else:\n # Keep track of how many moves each player took\n x_moves = 0\n o_moves = 0\n # Loop over board list and count how many XO moves\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n x_moves += 1\n elif board[i][j] == O:\n o_moves += 1\n # If X has more moves its O's turn otherwise its X's turn\n return O if x_moves > o_moves else X", "def handle_get_action(self, state):\n\n # This is an example player who picks random moves. REMOVE THIS WHEN YOU ADD YOUR OWN CODE !!\n\n #next_move = tuple(self.pick_random_free_cell(\n # state, size=int(math.sqrt(len(state)-1))))\n #############################\n #\n #\n NN_state = self.server_state_to_NN_state(state)\n predictions = self.policy_network.predict([[NN_state]])\n next_move = np.argmax(predictions)\n self.game.set_state(NN_state,1)\n legal_actions = self.game.get_legal_actions()\n if next_move not in legal_actions:\n next_move = np.random.choice(legal_actions,1)\n next_move = self.action_to_tuple_action(next_move)\n\n #\n # next_move = ???\n ##############################\n return next_move", "def computerTurn(board):\n\n i, j = bestMove(board)\n\n board[i][j] = computer\n pygame.time.delay(500)\n updateWindow(i, j, computer)", "def select_move_minimax(board, color):\n best_utility = -math.inf\n new_color = 1 if color == 2 else 2\n possible_moves = get_possible_moves(board, color)\n best_move = 0,0\n if len(possible_moves) > 0:\n best_move = possible_moves[0]\n sorted_states_list = []\n for move in possible_moves:\n new_board = play_move(board, color, move[0], move[1])\n sort_utility = compute_utility(new_board, color)\n heappush(sorted_states_list, (sort_utility, new_board, move))\n\n sorted_states = [x[1] for x in sorted_states_list]\n moves = [x[2] for x in sorted_states_list]\n index = 0\n for board_state in sorted_states:\n if board_state in caching_states:\n\n utility = caching_states[board_state]\n else:\n utility = minimax_min_node(board_state, new_color)\n caching_states[board_state] = utility\n\n if utility > best_utility:\n best_move = moves[index]\n best_utility = utility\n index += 1\n\n return best_move", "def computer_move():\n\tmove = random.choice(moves)\n\tprint \"Computer's move is %s\" % move\n\treturn move", "def actions(self, state):\n\n actions = []\n \n # if its player 1's turn\n if state.maxs_turn==True:\n # look through all the squares on the board\n for coords in state.gameState:\n # if its a rebel append allowable move and attack actions\n if state.gameState[coords]=='R':\n if state.gameState[(coords[0]-1, coords[1])]== ' ':\n actions.append(\"Move: Rebel @ {} --> {}\".format(coords, (coords[0]-1, coords[1])))\n if ((coords[0]-1, coords[1]+1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]+1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]+1)))\n if ((coords[0]-1, coords[1]-1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]-1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]-1)))\n \n # if its a jedi append allowable move and attack actions\n elif state.gameState[coords]=='J':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n # walk in each direction until reaching the edge of board, or a player\n while (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Jedi @ {} --> {}\".format(coords, coord))\n coord = (coord[0]+direction[0], coord[1]+direction[1])\n # if we ran into a sith we can attack\n if (coord in state.gameState) and (state.gameState[coord] == 'S'):\n actions.append(\"Attack: Jedi @ {} --> Sith @ {}\".format(coords, coord))\n \n else:\n for coords in state.gameState:\n if state.gameState[coords]=='S':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n if (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Sith @ {} --> {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'R'):\n actions.append(\"Attack: Sith @ {} --> Rebel @ {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'J'):\n actions.append(\"Attack: Sith @ {} --> Jedi @ {}\".format(coords, coord))\n \n\n\n if len(actions)==0:\n actions.append(\"Pass\")\n \n actions.sort()\n \n return actions", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def make_move(self, state):\n emptySpaces = 0\n for row in state:\n emptySpaces = emptySpaces + row.count(' ')\n if emptySpaces > 17:\n drop_phase = True\n else:\n drop_phase = False\n\n move = []\n if not drop_phase:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, False, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]!= ' ' and best_state[i][j]== ' ':\n move.append((i,j))\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n # TODO: choose a piece to move and remove it from the board\n # (You may move this condition anywhere, just be sure to handle it)\n #\n # Until this part is implemented and the move list is updated\n # accordingly, the AI will not follow the rules after the drop phase!\n \n\n # select an unoccupied space randomly\n # TODO: implement a minimax algorithm to play better\n \n else:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, True, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n return move", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n\n # print the valid moves on board for current player\n move = board.last_move\n\n # enemy agent's id\n enemy = self.id % 2 + 1\n\n value = self.evaluateRows(board, enemy) + self.evaluateCols(board, enemy) + self.evaluateBackwardDiagonals(board, enemy) + self.evaluateForwardDiagonals(board, enemy)\n return value", "def result(board, action):\n some_board = deepcopy(board)\n if terminal(some_board):\n return some_board\n if not action[0] in range (0,3) or not action[1] in range(0,3):\n raise NotImplementedError\n elif some_board[action[0]][action[1]] != EMPTY:\n raise NotImplementedError\n else:\n some_board[action[0]][action[1]] = player(some_board)\n return some_board", "def main():\n\n print('R-In-A-Row')\n print()\n\n while True:\n if play == 'human vs human':\n human1Tile, human2Tile = enterHuman1Tile()\n\n turn = whoGoesFirst()\n print('The %s player will got first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'human vs computer':\n human1Tile, computer1Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'computer vs computer':\n computer1Tile, computer2Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n\n\n while True:\n if play == 'human vs human':\n if turn == 'human1':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n\n makeMove(mainBoard, human1Tile, move)\n\n if isWinner(mainBoard, human1Tile):\n winner = 'human1'\n\n break\n turn = 'human2'\n if turn == 'human2':\n drawBoard(mainBoard)\n move2 = getHuman2Move(mainBoard)\n makeMove(mainBoard, human2Tile, move2)\n if isWinner(mainBoard, human2Tile):\n winner = 'human2'\n break\n turn = 'human1'\n\n elif play == 'human vs computer' :\n if turn == 'human':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n makeMove(mainBoard, human1Tile, move)\n if isWinner(mainBoard, human1Tile):\n winner = 'human'\n\n break\n turn ='computer'\n\n elif turn == 'computer':\n drawBoard(mainBoard)\n print('The computer is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer'\n break\n turn = 'human'\n elif play == 'computer vs computer':\n if turn == 'computer1':\n drawBoard(mainBoard)\n print('computer1 is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer1'\n break\n turn = 'computer2'\n elif turn == 'computer2':\n drawBoard(mainBoard)\n print('computer2 is thinking...')\n move = getComputer2Move(mainBoard, computer2Tile)\n makeMove(mainBoard, computer2Tile, move)\n if isWinner(mainBoard, computer2Tile):\n winner = 'computer2'\n break\n turn = 'computer1'\n\n\n if isBoardFull(mainBoard):\n winner = 'tie'\n break\n\n drawBoard(mainBoard)\n print('Winner is: %s' % winner)\n if not playAgain():\n break", "def __init__(self, action, pre_state, action_player=2, next_player=1, has_color=True):\n\n self.action = action\n # Initial state\n if pre_state is None:\n self.action_player = action_player\n self.player = next_player\n self.available_moves = set()\n for x in range(1, 16):\n for y in range(1, 16):\n if (x, y) != action:\n self.available_moves.add((x, y))\n self.occupied = {}\n if next_player == 1:\n self.occupied[action] = 2\n else:\n self.occupied[action] = 1\n self.top = action[0]\n self.bottom = action[0]\n self.left = action[1]\n self.right = action[1]\n global use_color\n use_color = has_color\n else:\n self.action_player = pre_state.player\n if pre_state.player == 1:\n self.player = 2\n else:\n self.player = 1\n self.available_moves = set(pre_state.available_moves)\n self.available_moves.remove(action)\n self.occupied = dict(pre_state.occupied)\n self.occupied[action] = pre_state.player\n # Set the most top, bottom, left, and right index for the state.\n if action[0] < pre_state.top:\n self.top = action[0]\n else:\n self.top = pre_state.top\n if action[0] > pre_state.bottom:\n self.bottom = action[0]\n else:\n self.bottom = pre_state.bottom\n if action[1] < pre_state.left:\n self.left = action[1]\n else:\n self.left = pre_state.left\n if action[1] > pre_state.right:\n self.right = action[1]\n else:\n self.right = pre_state.right\n self.pre_state = pre_state\n if self.action_player == 1:\n self.value = evaluate_state(self)\n else:\n self.value = -evaluate_state(self)", "def get_next_moves1(self):\n moves = []\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n if self.board[i][j] == \"\":\n next_board = copy.deepcopy(self.board)\n next_board[i][j] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n next_turn = get_opponent(self.turn)\n moves.append(DotsAndBoxesState(next_board, next_turn))\n return moves", "def pick_move(self, rack):\n\n global WIDTH\n\n WIDTH = len(rack)\n\n list_rack = [list(slot) for slot in rack] #untuple-ize\n current = State(list_rack, self.player_id) #make state of current rack\n\n options = []\n #look at all options at first ply layer, which will have recursed down to the bottom\n for c in range(WIDTH): #for every column in the board\n\n #make a state based on the move at column c\n attempt = current.simul_move(c, self.player_id)\n\n if attempt is not None: #if this produced a move\n #next player gets to move\n if self.player_id == 1: next_player = 2\n else: next_player = 1\n attempt_score = self._find_move(attempt, 1, self.difficulty_level, next_player)\n\n #put this move and its column into a list\n options.append((attempt_score, c))\n\n #pick the maximum\n selected = max(options)\n #pick move associated with maximum\n return selected[1]", "def mm_move(board, player):\n result = board.check_win() # get result of the current board\n if result == None:\n move_list = board.get_empty_squares() # get the tree branches and possible next moves\n best = (None, (-1, -1))\n for step in move_list:\n bd_clone = board.clone()\n bd_clone.move(step[0], step[1], player) #make a move on a cloned board\n next_player = provided.switch_player(player)\n next_score = mm_move(bd_clone, next_player) #make a recursive call to mm_move() pasing the cloned board and the 'other' player\n if player == 3: #if it is oppo O--min\n if best[0] == None or (next_score[0] < best[0]):\n best = (next_score[0], step)\n #print best\n elif player ==2: #if it is X--max\n if best[0] == None or (next_score[0] > best[0]):\n best = (next_score[0], step)\n return best\n else:\n return SCORES[result], (-1, -1)", "def ai_strategy3(gstate: TicTacToe, game_tree):\n status, player = gstate.game_status\n\n if status != \"turn\":\n logging.warning(\"Game status = %s. No move needed.\", status)\n return None\n\n win, lose = (-1, 1) if player == \"1\" else (1, -1)\n\n move_vals = {\n move: win * game_tree[state][\"value\"] for move, state in gstate.next_moves.items()\n }\n max_val = max(move_vals.values())\n\n if max_val == 1:\n # ai_strategy2 can handle \"won\" states\n return ai_strategy2(gstate, game_tree)\n\n else:\n ok_moves = [move for move, val in move_vals.items() if val == max_val]\n\n move_vals = {\n move: collections.Counter(\n [\n game_tree[state2][\"value\"] for state2 in game_tree[gstate.next_moves[move]][\"explored\"]\n ]\n )\n for move in ok_moves\n }\n\n move_eval = {\n move: val_ctr.get(win, 0) / val_ctr.get(lose, 0.5)\n for move, val_ctr in move_vals.items()\n }\n max_win_pct = max(move_eval.values())\n good_moves = [move for move, win_pct in move_eval.items() if win_pct ==\n max_win_pct]\n\n move = random.choice(good_moves)\n\n all_ct = sum(move_vals[move].values())\n win_ct = move_vals[move].get(win, 0)\n logging.debug(\n \"move: %i; value: %i, win %%: %.1f%%, moves: %s\\n\",\n move, win * game_tree[gstate.state][\"value\"],\n win_ct / max(all_ct, 0.1) * 100, move_vals\n )\n\n return move", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, 0, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 0, False, {}\n else:\n return state, -100, True, {}", "def available_moves(board_state):\n for x, y in itertools.product(range(len(board_state)), range(len(board_state[0]))):\n if board_state[x][y] == 0:\n yield (x, y)", "def ai_turn(c_choice, h_choice, xi, yi):\r\n depth = len(empty_cells(board))\r\n if depth == 0 or game_over(board):\r\n return\r\n\r\n clean()\r\n print(f'Computer turn [{c_choice}]')\r\n render(board, c_choice, h_choice)\r\n\r\n if depth == 70:\r\n x = choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n y = choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n else:\r\n move = minimax(board, depth, COMP)\r\n x, y = move[0], move[1]\r\n \r\n set_move(x, y, 1, xi, yi)\r\n time.sleep(1)", "def minimax(board):\n\n if len(actions(board)) == 9:\n action = actions(board)\n return action[random.randint(0, len(actions(board)) - 1)]\n\n if terminal(board):\n return None\n\n best_move = None\n\n if player(board) == X:\n best_score = -1\n else:\n best_score = 1\n\n for action in actions(board):\n if player(board) == X:\n score = minimize(result(board, action))\n\n if score >= best_score:\n best_move = action\n best_score = score\n else:\n score = maximize(result(board, action))\n\n if score <= best_score:\n best_move = action\n best_score = score\n\n return best_move", "def select_move(self, game_state):\n raise NotImplementedError()", "def result(board, action):\n newstate = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n rows = 3\n columns = 3\n for i in range(rows):\n for j in range(columns):\n newstate[i][j] = board[i][j]\n# print(newstate)\n# print(action)\n ival = action[0]\n jval = action[1]\n if ival > 2:\n raise Exception(\"invalid i action\")\n if jval > 2:\n raise Exception(\"invalid j action\")\n if board[ival][jval] != EMPTY:\n raise Exception(\"invalid action\")\n else:\n if player(board) == X:\n newstate[ival][jval] = X\n else:\n newstate[ival][jval] = O\n return newstate\n\n #raise NotImplementedError", "def ai_2(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is None:\n board.cpiece_id = choose_none_winable_piece(board)\n else:\n board[choice(list(board.open_spots))] = board.cpiece_id\n board.cpiece_id = choose_none_winable_piece(board)\n\n if (board.cpiece_id is None) and not board.is_full:\n board.cpiece_id, _ = choice(list(board.unused_game_pieces))\n return board", "def __init__(self, n: int):\n        self.rows = [[n, -1] for _ in range(n)]\n        self.cols = [[n, -1] for _ in range(n)]\n        self.diag = [[n, -1], [n, -1]] # 0 for normal, 1 for anti\n        \n    def move(self, row: int, col: int, player: int) -> int:\n        r1, r2 = self.check(self.rows, row, player), self.check(self.cols, col, player)\n        r3, r4 = 0, 0\n        if(row == col):\n            r3 = self.check(self.diag, 0, player)\n        if(row + col == len(self.rows)-1):\n            r4 = self.check(self.diag, 1, player)\n        \n        return max(r1,r2,r3,r4)\n    def check(self, arr, i, player):\n        arr[i][0] -= 1\n        \n        if(arr[i][1] == -1):\n            arr[i][1] = player\n        elif(arr[i][1] != player):\n            arr[i][1] = 0\n        \n        if(arr[i][0] == 0 and arr[i][1] != 0):\n            return player\n        return 0\n        \n        \"\"\"\n       Player {player} makes a move at ({row}, {col}).\n       @param row The row of the board.\n       @param col The column of the board.\n       @param player The player, can be either 1 or 2.\n       @return The current winning condition, can be either:\n               0: No one wins.\n               1: Player 1 wins.\n               2: Player 2 wins.\n       \"\"\"\n        ", "def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0:\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1:\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2:\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3:\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) \n for goal in self.goals:\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")", "def _makeAMove(self, prev_move, board: str) -> int:\n\n (myWins, otherWins, _, _) = self.winsBlocksForks(board)\n move = choice([self.findEmptyCell(board, myWin) for myWin in myWins] if myWins else\n [self.findEmptyCell(board, otherWin) for otherWin in otherWins] if otherWins else\n list(self.otherMove(board, emptyCellsCount(board)))\n )\n return move", "def _policy(self, gameboard):\r\n valid_moves = self._all_valid_moves(gameboard)\r\n _reflex_ = Reflex(self.color)\r\n best_move = None\r\n moves = []\r\n \r\n # step 1, check going to win\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_going_to_win(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n\r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n \r\n # step 2, check opponent 4\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_._alter_check_opponent_4(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n \r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 3, check opponent 3\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_opponent_3(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n \r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 4, winning blocks\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_winning_blocks(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n\r\n if len(moves) > 0:\r\n moves = list(set(moves))\r\n moves.sort(key=lambda x: x[2], reverse=True)\r\n max_count = moves[0][2]\r\n new_moves = []\r\n\r\n for t in moves:\r\n if t[2] < max_count:\r\n break\r\n else:\r\n new_moves.append((t[0], t[1]))\r\n\r\n moves = new_moves.copy()\r\n\r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 5, random pick one\r\n idx = np.random.choice(len(valid_moves), 1)[0]\r\n return valid_moves[idx]" ]
[ "0.7180501", "0.7104202", "0.7091236", "0.7071786", "0.7070496", "0.7067395", "0.70613885", "0.69441974", "0.69415534", "0.692273", "0.69195575", "0.6913044", "0.69035596", "0.69028074", "0.68817395", "0.6827539", "0.68212116", "0.6801467", "0.67608017", "0.6755071", "0.67443556", "0.6742645", "0.6727172", "0.66985184", "0.6695921", "0.6690848", "0.6675819", "0.66592467", "0.6643143", "0.6641673", "0.6633428", "0.66228676", "0.66204363", "0.6594802", "0.6589713", "0.6589126", "0.65843844", "0.65758175", "0.65719783", "0.65614015", "0.656008", "0.6548624", "0.65336514", "0.6515121", "0.65136766", "0.6513122", "0.650992", "0.6505521", "0.6502633", "0.6502011", "0.6501499", "0.64993113", "0.6491775", "0.6485591", "0.6479415", "0.6474032", "0.6472749", "0.64669275", "0.64638513", "0.6445481", "0.64453197", "0.64360946", "0.6423139", "0.64156896", "0.64141595", "0.64134645", "0.6394216", "0.6386983", "0.6384548", "0.6381603", "0.63741744", "0.6373862", "0.6372858", "0.63644385", "0.63623965", "0.6362235", "0.63610023", "0.6360621", "0.6355915", "0.6352218", "0.63469595", "0.6345904", "0.6340968", "0.6335658", "0.63342816", "0.6333623", "0.63334966", "0.6332721", "0.63321644", "0.63278425", "0.6327298", "0.6325769", "0.63243604", "0.6323903", "0.63122994", "0.631151", "0.6306591", "0.6303942", "0.62995195", "0.62992954" ]
0.66842777
26
Creates a list of victory conditions based on the size of the board
def create_victory_conditions(size): #Written by Cody West. Not used in current program, could be used to make boards of different sizes victory_conditions = [] for i in range(size): horizontal_victory = [] for n in range(size): horizontal_victory.append(size*i+n) victory_conditions.append(horizontal_victory) for i in range(size): vertical_victory = [] for n in range(size): vertical_victory.append(size*n+i) victory_conditions.append(vertical_victory) diagonal_victory_1 = [] for i in range(size): diagonal_victory_1.append(size*i+i) victory_conditions.append(diagonal_victory_1) diagonal_victory_2 = [] for i in range(size): diagonal_victory_2.append((i+1)*size-(i+1)) victory_conditions.append(diagonal_victory_2) return(victory_conditions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_board(size) -> list:\n return list(itertools.product([i for i in range(size)], repeat=2))", "def create_board(self, size, cars):\n board = [[None for i in range(size)] for j in range(size)]\n\n for car in cars.values():\n for i in range(car.length):\n if car.orientation == 'H':\n board[car.row][car.col + i] = car\n if car.orientation == 'V':\n board[car.row + i][car.col] = car\n\n return board", "def board(constraints):\n rows = len(constraints[0])\n columns = len(constraints[1])\n board = []\n for i in range(rows):\n board.append([Empty for k in range(columns)])\n return board", "def __checkvictory__(self,playerchar):\n\t\tvictory = False\n\t\tboardx = deepcopy(self.board)\n\t\trow = 5\n\t\tcolumn = 6\n\t\tstarburst_bag = []\n\t\tcats_game = True\n\t\tfor a in range(row+1):\n\t\t\tfor b in range(column+1):\n\t\t\t\tstarburst = []\n\t\t\t\tstarburst.append((a,b))\n\t\t\t\t\n\t\t\t\tif self.__checkplace__(a,b) is True:\n\t\t\t\t\tcats_game = False\n\t\t\t\t\tcontinue\n\t\t\t\telif self.__checkplace__(a,b) == playerchar:\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a-starburst[1] < 0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a-starburst[1],b) == playerchar:\n\t\t\t\t\t\t\tstarburst[1] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a-starburst[2] < 0:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b+starburst[2] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a-starburst[2],b+starburst[2])\\\n\t\t\t\t\t\t == playerchar:\n\t\t\t\t\t\t\tstarburst[2] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif b+starburst[3] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a,b+starburst[3]) == playerchar:\n\t\t\t\t\t\t\tstarburst[3] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tstarburst.append(1)\n\t\t\t\t\t\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tif a+starburst[4] > 5:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif b+starburst[4] > 6:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif self.__checkplace__(a+starburst[4],b+starburst[4])\\\n\t\t\t\t\t\t== playerchar:\n\t\t\t\t\t\t\tstarburst[4] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\tstarburst_bag.append(starburst)\n\t\t\n\t\tfor starburst in starburst_bag:\n\t\t\t\n\t\t\ta = starburst[0][0]\n\t\t\tb = starburst[0][1]\n\t\t\t\n\t\t\tif starburst[1] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[1]):\n\t\t\t\t\tboardx[a-i][b] = boardx[a-i][b].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[2] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[2]):\n\t\t\t\t\tboardx[a-i][b+i] = boardx[a-i][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[3] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[3]):\n\t\t\t\t\tboardx[a][b+i] = boardx[a][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\tif starburst[4] > 3:\n\t\t\t\tvictory = True\n\t\t\t\tfor i in range(starburst[4]):\n\t\t\t\t\tboardx[a+i][b+i] = boardx[a+i][b+i].\\\n\t\t\t\t\treplace(playerchar,playerchar.upper())\n\t\t\t\n\t\tif cats_game:\n\t\t\treturn None\n\t\tif victory:\n\t\t\treturn boardx\n\t\telse:\n\t\t\treturn False", "def __init__(self, size):\n\t\tself.size = size\n\t\tself.board = []\n\t\tnew = []\n\t\tfor i in range(0, size, 1):\n\t\t\tfor j in range(0, size, 1):\n\t\t\t\tnew.append(0)\n\t\t\tself.board.append(new)\n\t\t\tnew = []", "def make_board():\n return [[0 for i in range(8)] for i in range(8)]", "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def generateQueenAttacks(boardsize, pos):\n assert isinstance(pos, Position) and validatePosition(boardsize, pos)\n attackList = []\n startPos = Position(pos.x, pos.y)\n \n def addAttackList(pos):\n for attacked in attackList:\n if pos.compare(attacked):\n return\n attackList.append(Position(pos.x, pos.y))\n\n #positive x\n while pos.x < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #positive y\n while pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative x\n while pos.x >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative y\n while pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x +y left bottom\n while pos.x >= 0 and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x -y left top\n while pos.x >= 0 and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x +y right bottom\n while pos.x < boardsize and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x -y right top\n while pos.x < boardsize and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n\n return attackList", "def check_victory(board):\n\n for idx in range(3):\n if board[idx][0] != ' ' and board[idx][0] == board[idx][1] == board[idx][2]:\n # This checks if all items in each horizontal row is complete.\n print('Victory to ' + board[idx][0])\n return True\n elif board[0][idx] != ' ' and board[0][idx] == board[1][idx] == board[2][idx]:\n # This checks if all the items in each vertical column is complete.\n print('Victory to ' + board[0][idx])\n return True\n\n if board[0][0] != ' ' and board[0][0] == board[1][1] == board[2][2]:\n # This checks if the left to right diagonal is complete.\n print('Victory to ' + board[0][0])\n return True\n elif board[2][0] != ' ' and board[2][0] == board[1][1] == board[0][2]:\n # This checks if the right to left diagonal is complete.\n print('Victory to ' + board[2][0])\n return True\n\n return False", "def create_board(self, size):\n self.board = [\n [FieldState.EMPTY for _ in range(size)]\n for _ in range(size)\n ]", "def buildBoard(self, n):\n\n boardDict = []\n diagCount = 0\n\n for i in range(n):\n self.rows[i] = [True, \"\", 0] #homogenous, X/O, count of X's/O's\n self.cols[i] = [True, \"\", 0]\n for j in range(n):\n\n# Is there a faster way to make this array than nested for loops?\n boardDict.append((i,j))\n return boardDict", "def makeBoard(n):\n valid_positions = []\n for i in range(0, n):\n for j in range(0,n):\n valid_positions.append(Position(i,j))\n return valid_positions", "def create_board(board_size):\n board = []\n for i in range(board_size):\n row = []\n for j in range(board_size):\n row.append('-')\n board.append(row)\n return board", "def initializer():\n size: int = int(input(\"Enter a number for the board size: \"))\n board: List[List[str]] = [[random.choice([\"X\", \"O\", \" \"]) for x in range(size)] for y in\n range(size)]\n return size, board", "def initialise(length, runs):\n # The first run of fix_row() or fix_col() will find this anyway. But this is faster\n arr = [EITHER] * length\n free_whites = length - sum(runs) - (len(runs) - 1) # remaining whites to place\n j = 0 # current position\n for x in runs:\n if x > free_whites: # backfill s \n for c in range(j + free_whites, j + x): \n arr[c] = BLACK \n if (free_whites == 0) and (j + x < length):\n arr[j + x] = WHITE # can place a white too\n j += x + 1\n return arr", "def get_complete_3D_action_list():\n # Action is a tuple tile_type,nbr_to_move, row_to_move_to\n # 5 * 5 * 6 = 150 possibilities\n actions = list()\n for tt in range(0,5):\n for i in range(1,6): # the final value represents 5 or more\n for row in range(0,6):\n actions.append((tt,i,row))\n return actions", "def __init__(self, board_dim= DEFAULT_DIM):\r\n self.width = board_dim\r\n self.height = board_dim\r\n\r\n self.grid = np.array([[' '] * self.width for i in range(self.height)])\r\n self.num_checkers = 0 # keeps track of how many checkers have been added\r\n\r\n self.available_moves = [(row, col) for row in range(self.height) for col in range(self.width)]\r\n\r\n # Specify the winning condition based on the board's dimension\r\n if (self.width < 5):\r\n self.win_condition = self.width\r\n else:\r\n self.win_condition = 5", "def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}", "def gameOfLife(self, board: List[List[int]]) -> None:\n r, c = len(board), len(board[0])\n # 下面两行做zero padding\n board_exp = np.array([[0 for _ in range(c + 2)] for _ in range(r + 2)])\n board_exp[1:1 + r, 1:1 + c] = np.array(board)\n print(board_exp)\n # 设置卷积核\n kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n # 开始卷积\n for i in range(1, r + 1):\n for j in range(1, c + 1):\n # 统计细胞周围8个位置的状态\n temp_sum = np.sum(kernel * board_exp[i - 1:i + 2, j - 1:j + 2])\n # 按照题目规则进行判断\n if board_exp[i, j] == 1:\n if temp_sum < 2 or temp_sum > 3:\n board[i - 1][j - 1] = 0\n else:\n if temp_sum == 3:\n board[i - 1][j - 1] = 1", "def AI(current_board, AI_symbol, opponent_symbol, difficulty): #Written by Cody West\n victory_conditions = [[0,4,8],[2,4,6],[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8]] #Establishes victory conditions to be checked\n if difficulty >= 2: #If difficulty is at least 2\n ## Cody -- you could just write:\n ## for slots in victory_conditions\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions ## Oops\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list ## Oops \n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n ## This you can do even more efficiently using a beautiful syntax called\n ## \"list comprehension\" which entered python some years ago -- watch\n ## me do it in one line:\n ## check = [current_board[s] for s in slots]\n if check.count(AI_symbol)==2 and check.count(\" \")==1: #If there are any rows where the AI has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n ## Oops -- you repeat the code again here for no reason\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(opponent_symbol)==2 and check.count(\" \")==1: #If there are any rows where the opponent has two symbols and there's one empty spot\n return(slots[check.index(\" \")]) #Return the empty spot from that row\n if difficulty >= 3: #If difficulty is at least 3\n ## It looks like you're doing an identical loop here -- I\n ## wonder why you don't move the if statement inside the loop\n ## -- I believe that would significantly shorten your code\n for n in range(len(victory_conditions)): #For each victory condition in victory_conditions\n slots = victory_conditions[n] #Take the victory conditions and put them in a new list\n check = [] #Creates empty folder called check\n for i in range(len(slots)): #For each spot in slots\n check.append(current_board[slots[i]]) #Add the corresponding spot from the current board to check\n if check.count(AI_symbol)==1 and check.count(\" \")==2: #If there are any rows where the AI has one symbol and there's two empty spots\n if check[0] == \" \": #If the first slot from check is empty\n return(slots[0]) #Return the first slot\n else: \n return(slots[2]) #Return the third slot\n if difficulty == 4: #If difficulty is 4\n if current_board[4] == \" \": #If the center is empty\n return(4) #Take the center\n elif current_board[0] or current_board[2] or current_board[6] or current_board[8] == \" \": #Else, if a corner is open\n corners = 2*random.randint(0,4) #Selects a random corner (or center, which will reject)\n while current_board[corners] != \" \": #Until the corner selected is empty\n corners = 2*random.randint(0,4) #Select a new corner or center\n return(corners) #Return empty corner\n else:\n sides = 2*random.randint(0,3)+1 #Selects a side\n while current_board[sides] != \" \": #Until the side is empty\n sides = 2*random.randint(0,3)+1 #Selects a new side\n return(sides) #Returns empty side\n if difficulty < 4: #If difficulty is less than 4\n ran = random.randint(0,8) #Picks random spot on board\n while current_board[ran] != \" \": #Until the spot is empty\n ran = random.randint(0,8) #Picks a new spot\n return(ran) #Returns empty spot", "def inner_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves", "def create_pristine_board(size=100):\n board = defaultdict(dict)\n\n for i in xrange(1, size + 1):\n board[i] = {j: (j - i) for j in xrange(min(i + 1, size + 1), min(i + 7, size + 1))}\n\n return board", "def create_chessboard(size=8):\n r1 = (WHITE + BLACK) * int((size / 2)) + \"\\n\"\n r2 = (BLACK + WHITE) * int((size / 2)) + \"\\n\"\n print((r1 + r2) * int((size / 2)))", "def make_deck():\n deck = []\n for i in range(13):\n for j in range(13):\n if j >= i:\n deck.append([i, j])\n else:\n pass\n return deck", "def generate_board(rows, cols):\n aux = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() < 0.5:\n aux[i][j] = 1\n return aux", "def possibilities(board):\n return board[np.where(board == 0)]", "def make_board(row_size: int, column_size: int) -> list:\n board = []\n for r in range(row_size): # Creates a list for each row.\n row = []\n for c in range(column_size): # Populates the list with a pair of coords for each row.\n row.append((c, r))\n board.append(row)\n return board", "def actions(self, state):\n \n #les actions sont définies comme étant les nombres possibles dans \n #la case i,j\n theActions = []\n for i in range(size):\n for j in range(size):\n line = i\n col = j\n if(state[i][j] == 0):\n possibleNumbers = [1,2,3,4,5,6,7,8,9]\n config = state\n for a in range(size):\n x = config[line][a]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n for b in range(size):\n x = config[b][col]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n \n #identifie quelle boite on veut vérifier\n hBox = col - col % 3\n vBox = line - line % 3\n \n for c in range(3):\n for d in range(3):\n x = config[c+vBox][d+hBox]\n if(x in possibleNumbers):\n possibleNumbers.remove(x)\n for k in possibleNumbers:\n theActions.append((i,j,k))\n return theActions", "def drawBoard():\t\n\t#draw 64 Rectangles from (MARGINH,MARGINV) with CASESIZE sizes\n\tfor i in range(BOARDSIZE):\n\t\tfor j in range(BOARDSIZE):\n\t\t\tpygame.draw.rect(DISPLAYSURF, BLACK, [MARGINH + (i)*CASESIZE, MARGINV + (j)*CASESIZE, CASESIZE, CASESIZE], 1)", "def reduce_possibilities_by_box(self):\n x = self.targetCell.x\n y = self.targetCell.y\n if x < 3 and y < 3: #top left\n self.check_box1()\n if x > 2 and x < 6 and y < 3: #middle left\n self.check_box2()\n if x > 5 and y < 3: #bottom left\n self.check_box3()\n if x < 3 and y > 2 and y < 6: #top middle\n self.check_box4()\n if x > 2 and x < 6 and y > 2 and y < 6: #center\n self.check_box5()\n if x > 5 and y > 2 and y < 6: #bottom middle\n self.check_box6()\n if x < 3 and y > 5: #top right\n self.check_box7()\n if x > 2 and x < 6 and y > 5: #middle right\n self.check_box8()\n if x > 5 and y > 5: #bottom right\n self.check_box9()\n self.targetCell.box_neighbour_possibilities = flatten_list(self.targetCell.box_neighbour_possibilities)", "def __init__(self, board_size=19, rules=None):\n self.board_size = board_size\n self.board = board_generate_empty(board_size)\n self.board_history = [deepcopy(self.board)]\n self.rules = rules if rules else {'suicide': False, 'komi': 6.5, 'superko': True, 'editmode': False}\n self.captures = {'w': [], 'b': []}\n self.turn = \"b\"\n self.latest_status = None", "def new_board() -> list:\n board = []\n for _ in range(BOARDHEIGHT):\n board.append([BLANK] * BOARDWIDTH)\n\n return board", "def on_board(pos, size):\n\n row, col = pos\n height, width = size\n\n return 0 <= row <= width and 0 <= col <= height", "def action_space(self, state):\n return [create_board(i, state.shape[0])\n for i in _action_space(state, remove_isometries = False,\n remove_losses = False,\n get_probs = False)]", "def checkMoves(self,board):\n possibleMoves = []\n\n for c in xrange(0,8):\n for r in xrange(0,8):\n if board.isValidMove(self.tile,c,r):\n possibleMoves.append(c+r*8)\n\n return possibleMoves", "def __init__(self, board = INITIAL_BOARD, n = 5):\n self.n = n\n self.numPlayer1 = 0\n self.numPlayer2 = 0\n self.board = [values[:] for values in board]\n for i in range(self.n):\n for j in range(self.n):\n if self.board[i][j] == -1:\n self.numPlayer1 += 1\n if self.board[i][j] == 1:\n self.numPlayer2 += 1", "def mazectric(size):\n assert size % 2\n grid = np.random.randint(0, 2, size=(size, size,), dtype=bool)\n grid[0, 0:size] = grid[size - 1, 0:size] = True\n grid[0:size, 0] = grid[0:size, size - 1] = True\n \n key = hash(str(grid))\n looped = set()\n yield grid\n \n def alive(i, j):\n n = np.sum(grid[max(0, i-1):i+2, max(0, j-1):j+2]) - grid[i, j]\n return 1 if grid[i, j] and 0 < n < 5 else int(n == 3)\n \n while key not in looped:\n looped.add(key)\n grid = np.array([[alive(i, j) for j in range(size)] \\\n for i in range(size)], dtype=bool)\n grid[0, 0:size] = grid[size - 1, 0:size] = True\n grid[0:size, 0] = grid[0:size, size - 1] = True\n key = hash(str(grid))\n yield grid", "def randomCells(w, h):\n A = createBoard(w, h)\n\n for row in range(1, h-1):\n for col in range(1, w-1):\n if random.choice([0, 1]) == 1:\n A[row][col] = 1\n else:\n A[row][col] = 0\n return A", "def gen_grid(grid_width, grid_height):\n\n grid = []\n for x in range(0, grid_width):\n grid.append([])\n for y in range(0, grid_height):\n grid[x].append(False)\n return grid", "def guarded_places(self):\n guarded = []\n for x in range(8):\n for y in range(8):\n if self.squares[x][y].piece and self.squares[x][y].piece.color != self.turn:\n squares = self.squares[x][y].piece.actions(self, (x, y), True)\n if self.squares[x][y].piece.name != 'pawn': # pawns capture in different areas than they move\n guarded.extend(squares[0])\n guarded.extend(squares[1])\n return guarded", "def __init__(self,size, population):\r\n\r\n if size < 2 * population:\r\n raise ValueError(\"Game board not big enough to fit population for both players.\")\r\n\r\n self.board = [[0 for row in range(size)] for column in range(size)]\r\n self.capture_options = []\r\n self.finished = False\r\n \r\n for row in range(size):\r\n # Only populate the given row amount for each player.\r\n if row < population or row > size - population - 1:\r\n for column in range(size):\r\n if (row+column) % 2 == 0:\r\n self.board[row][column] = 1\r\n if row > size - population - 1:\r\n self.board[row][column] = -1", "def _new_board(board_size):\n return tuple(tuple(0 for _ in range(board_size)) for _ in range(board_size))", "def generate_board(self):\n new_board = []\n for i in range(self.size):\n new_board.append([\"0\"] * self.size)\n return new_board", "def init_cells(self):\n state = list()\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if randint(1, 100) >= 100 - CELL_DENSITY:\n # Live cell.\n status = NORMAL\n state.append(1)\n else:\n # Dead cell.\n status = HIDDEN\n state.append(0)\n\n cell = self.canvas.create_rectangle((index % width) * CELL_SIZE, (index / width) * CELL_SIZE,\n ((index % width) + 1) * CELL_SIZE, ((index / width) + 1) * CELL_SIZE,\n fill=\"black\", state=status, outline=\"white\")\n self.cells.append(cell)\n\n return state", "def get_all_possible_moves(self):\r\n moves = []\r\n for i in range(8):\r\n for j in range(8):\r\n color = self.board[i][j][0]\r\n if (color == 'b' and not self.turn_white) or (color == 'w' and self.turn_white):\r\n p_type = self.board[i][j][1]\r\n if p_type == 'r':\r\n self.get_rook_moves(i, j, moves)\r\n elif p_type == 'k':\r\n self.get_king_moves(i, j, moves)\r\n elif p_type == 'q':\r\n self.get_queen_moves(i, j, moves)\r\n elif p_type == 'p':\r\n self.get_pawn_moves(i, j, moves)\r\n elif p_type == 'b':\r\n self.get_bishop_moves(i, j, moves)\r\n elif p_type == 'n':\r\n self.get_knight_moves(i, j, moves)\r\n return moves", "def innerCells(w, h):\n A = createBoard(w, h)\n\n for row in range(1, h-1):\n for col in range(1, w-1):\n if row == h-1:\n A[row][col] = 0\n elif col == w-1:\n A[row][col] = 0\n else:\n A[row][col] = 1\n return A", "def blank_board(self):\n return [[False for x in range(self._dim)] for y in range(self._dim)]", "def Get_empty_cells(difficulty, size):\n if(difficulty == 'beginner'):\n return size**2 - 50\n elif (difficulty == 'easy'):\n return size**2 - 40\n elif (difficulty == 'medium'):\n return size**2 - 33\n elif (difficulty == 'hard'):\n return size**2 - 26\n elif (difficulty == 'hell'):\n return size**2 - 17", "def make_game_grid(self):\n return numpy.array([[random.choice(string.ascii_uppercase) for breath in range(self.grid_size)] for depth in\n range(self.grid_size)])", "def create_population(board_size, population_size):\n return [Nonogram(board_size) for x in range(0, population_size)]", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def actions(board):\n actions_set = []\n\n for i in range(len(board)):\n for j in range(len(board)):\n if board[i][j] == EMPTY:\n actions_set.append((i, j))\n\n return actions_set", "def _initiate_board(self):\n grid = []\n for i in range(constant.BOARD_DIMENSION):\n # Starts each row\n current_row = []\n for j in range(constant.BOARD_DIMENSION):\n # Adds the pieces depending on the position\n if i < constant.ROWS_OF_PIECES:\n # Black pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.black))\n self.num_black_pieces = self.num_black_pieces + 1\n else:\n current_row.append(None)\n\n elif i >= constant.BOARD_DIMENSION - constant.ROWS_OF_PIECES:\n # White pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.white))\n self.num_white_pieces = self.num_white_pieces + 1\n else:\n current_row.append(None)\n\n else:\n current_row.append(None)\n\n grid.append(current_row)\n\n return grid", "def __init__(self, board_size=MAX_BOARD_SIZE, cell_size=MAX_CELL_SIZE, dead_color=DEAD, alive_color=ALIVE):\n self._board_size = board_size\n self._cell_size = cell_size\n self.dead_color = dead_color\n self.alive_color = alive_color\n\n self.board = []\n self.mode = 0", "def __init__(self):\n self.empty_seats = [row * 8 + col for row in self.rows for col in self.cols]", "def __init__(self, size):\n self.size = size\n self.num_queens_placed = 0\n self.board = self.generate_board()", "def gameOfLife(self, board: List[List[int]]) -> None:\n def neighbour(i, j):\n total = 0\n for x,y in ((i+1, j), (i-1, j), (i, j+1), (i, j-1), (i-1, j-1), (i-1, j+1), (i+1, j+1), (i+1, j-1)):\n if x >= 0 and y >= 0 and x <= len(board) -1 and y <= len(board[0]) -1 and board[x][y] & 1:\n total += 1\n return total\n \n def rule(value,i, j):\n if value == 1:\n if neighbour(i, j) == 2 or neighbour(i, j) == 3:\n value |= 2\n elif value == 0:\n if neighbour(i, j) == 3:\n value |= 2\n return value\n \n if not len(board):\n return []\n m = len(board)\n n = len(board[0])\n \n for i in range(m):\n for j in range(n): \n board[i][j] = rule(board[i][j], i, j) \n \n for i in range(m):\n for j in range(n): \n board[i][j] = board[i][j] >> 1 \n \n \n \n return board", "def get_legal_moves(self, color):\n moves = [] # stores the legal moves.\n # Get all the squares with pieces of the given color.\n for x in range(self.n):\n for y in range(self.n):\n if self[x][y]==0:\n moves.append((x,y))\n return moves", "def board_generate_empty(size: 'board size'):\n empty_board = [[None] * size for _ in range(size)]\n return empty_board", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def create_chessboard(size=8):\r\n\r\n def _create_even_line(size):\r\n return ''.join([WHITE if i % 2 else BLACK for i in range(size)])\r\n \r\n even_line = _create_even_line(size)\r\n odd_line = even_line[::-1]\r\n \r\n print('\\n'.join(even_line if line % 2 else odd_line for line in range(size)))", "def actions(self,state):\n sick = []\n health = []\n num_s = 0\n num_h = 0\n for i in range(self.row):\n for j in range(self.col):\n if state[i][j][0] == 'S':\n sick.append((\"quarantine\", (i, j)))\n num_s += 1\n elif state[i][j][0] == 'H':\n health.append((\"vaccinate\", (i, j)))\n num_h += 1\n\n res = []\n if num_h < self.medics:\n health_pow = list(chain.from_iterable(combinations(health, r) for r in range(num_h, num_h + 1)))[:]\n else:\n health_pow = list(chain.from_iterable(combinations(health, r) for r in range(self.medics, self.medics + 1)))[:]\n if num_s < self.police:\n sick_pow = list(chain.from_iterable(combinations(sick, r) for r in range(num_s, num_s + 1)))[:]\n else:\n sick_pow = list(chain.from_iterable(combinations(sick, r) for r in range(self.police, self.police + 1)))[:]\n if len(health_pow) == 0:\n sick_pow.append(())\n return tuple(sick_pow)\n if len(sick_pow) == 0:\n health_pow.append(())\n return tuple(health_pow)\n for i in range(len(health_pow)):\n for j in range(len(sick_pow)):\n res.append(health_pow[i] + sick_pow[j])\n return tuple(res)", "def compute_territory(self):\n # Initialize the square occupancy vector of the board.\n vector = self.board.new_vector()\n\n # Mark current position as reachable.\n vector[self.index] = True\n\n # List all places reacheable by the piece from its current position.\n for x_shift, y_shift in self.movements:\n # Mark side positions as reachable if in the limit of the board.\n try:\n reachable_index = self.board.coordinates_to_index(\n self.x, self.y, x_shift, y_shift)\n except ForbiddenCoordinates:\n continue\n vector[reachable_index] = True\n\n return vector", "def make_board(N):\n assert N >= 1, \"Invalid board dimension\";\n assert type(N) == int, \"N must be an integer\";\n return [[\"*\" for x in range(N)] for x in range(N)];", "def create_board(rows, columns):\n res = [[0 for i in range(columns)] for j in range(rows)]\n return res", "def get_all_possible_moves():\r\n \"\"\"\r\n Creates the labels for the universal chess interface into an array and returns them\r\n \"\"\"\r\n labels_array = []\r\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\r\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\r\n promoted_to = ['q', 'r', 'b', 'n']\r\n\r\n for l1 in range(8):\r\n for n1 in range(8):\r\n destinations = [(t, n1) for t in range(8)] + \\\r\n [(l1, t) for t in range(8)] + \\\r\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\r\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\r\n [(l1 + a, n1 + b) for (a, b) in\r\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\r\n for (l2, n2) in destinations:\r\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\r\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\r\n labels_array.append(move)\r\n for l1 in range(8):\r\n l = letters[l1]\r\n for p in promoted_to:\r\n labels_array.append(l + '2' + l + '1' + p)\r\n labels_array.append(l + '7' + l + '8' + p)\r\n if l1 > 0:\r\n l_l = letters[l1 - 1]\r\n labels_array.append(l + '2' + l_l + '1' + p)\r\n labels_array.append(l + '7' + l_l + '8' + p)\r\n if l1 < 7:\r\n l_r = letters[l1 + 1]\r\n labels_array.append(l + '2' + l_r + '1' + p)\r\n labels_array.append(l + '7' + l_r + '8' + p)\r\n return labels_array", "def legalMoves(self):\n return [c for c in range(self.getWidth()) if len([r for r in range(self.getHeight()) if self.cell[c][r]==EMPTY])>0 ]", "def _available_actions(self, state, colour):\n available_actions = []\n if colour == \"white\":\n stacks = +state.state\n else:\n stacks = -state.state\n for square in stacks.keys():\n available_actions.append((\"BOOM\", square))\n for square, n in stacks.items():\n for d in range(1, n + 1):\n for next_square in self._NEXT_SQUARES(square, d):\n if next_square in stacks or state.state[next_square] == 0:\n for m in range(1, n + 1):\n move_action = (\"MOVE\", m, square, next_square)\n available_actions.append(move_action)\n return available_actions", "def generate_list(self):\n\n array = [False] * 25\n bits = self.generate_bits()\n\n for column in range(2, -1, -1):\n for row in range(0, 5):\n bit = next(bits)\n\n array[column + (row * 5)] = bit\n array[(4 - column) + (row * 5)] = bit\n\n return array", "def get_winning_discs(self):\r\n for i in range(6):\r\n for j in range(7):\r\n if self.board[i][j] == 'O':\r\n continue\r\n if self.is_horizontal_four(i, j):\r\n return [(i, x) for x in range(j, j + 4)]\r\n if self.is_vertical_four(i, j):\r\n return [(x, j) for x in range(i, i - 4, -1)]\r\n if self.is_neg_diagonal_four(i, j):\r\n return [(x, y) for x, y in zip(\r\n range(i, i + 4), range(j, j - 4, -1))]\r\n if self.is_pos_diagonal_four(i, j):\r\n return [(x, y)\r\n for x, y in zip(range(i, i + 4), range(j, j + 4))]", "def pawnThreat(self, color, fromRow, fromCol):\n\n if color == \"black\":\n direc = 1\n boardThreat = self.blackThreat\n else:\n direc = -1\n boardThreat = self.whiteThreat\n\n # Pawn Promotion is automatically changed to a Queen\n if fromRow == 0 or fromRow == 7:\n self.queenThreat(color, fromRow, fromCol)\n return\n\n # All spots that are threatened by pieces\n threatList = []\n # The list of all possible moves\n moveList = []\n\n # If it's the left column, it can only go diagonal right\n if fromCol == 0:\n toCol = 1\n toRow = fromRow + direc\n threatList.append((toRow, toCol))\n if self.pieces.piece_color(self.board.squares[toRow][toCol]) != color \\\n and self.pieces.piece_color(self.board.squares[toRow][toCol]) != None:\n moveList.append((toRow, toCol))\n\n # If it's in the right column, it can only go diagonal left\n elif fromCol == 7:\n toCol = 6\n toRow = fromRow + direc\n threatList.append((toRow, toCol))\n if self.pieces.piece_color(self.board.squares[toRow][toCol]) != color \\\n and self.pieces.piece_color(self.board.squares[toRow][toCol]) != None:\n moveList.append((toRow, toCol))\n\n # Otherwise the pawn can threaten diagonally both directions\n else:\n toCol1 = fromCol - 1\n toCol2 = fromCol + 1\n toRow = fromRow + direc\n threatList.append((toRow, toCol1))\n threatList.append((toRow, toCol2))\n if self.pieces.piece_color(self.board.squares[toRow][toCol1]) != color \\\n and self.pieces.piece_color(self.board.squares[toRow][toCol1]) != None:\n moveList.append((toRow, toCol1))\n if self.pieces.piece_color(self.board.squares[toRow][toCol2]) != color \\\n and self.pieces.piece_color(self.board.squares[toRow][toCol2]) != None:\n moveList.append((toRow, toCol2))\n\n # For pawn's first move, adds both spaces in front to the move list\n # Not added to threat because pawns can't kill in front\n if (fromRow == 1 and color == \"black\") or (fromRow == 6 and color == \"white\"):\n if self.board.squares[fromRow + direc][fromCol] == None:\n moveList.append((fromRow + direc, fromCol))\n if self.board.squares[fromRow + (direc * 2)][fromCol] == None:\n moveList.append((fromRow + (direc * 2), fromCol))\n else: # Otherwise just the one space in front\n if self.board.squares[fromRow + direc][fromCol] == None:\n moveList.append((fromRow + direc, fromCol))\n\n # Amends the boardThreat dictionary count of threatened spaces or adds it to the dictionary\n for move in threatList:\n if (move[0], move[1]) not in boardThreat.keys():\n boardThreat[(move[0], move[1])] = 1\n else:\n boardThreat[(move[0], move[1])] += 1\n\n # Adds possible move to move list if the pawn is not blocked without adding to threat\n for move in moveList:\n if self.pieces.piece_color(self.board.squares[move[0]][move[1]]) == color:\n continue\n if color == \"black\":\n self.blackMoves.append(((fromRow, fromCol), (move[0], move[1])))\n else:\n self.whiteMoves.append(((fromRow, fromCol), (move[0], move[1])))", "def make_board(side_len):\n assert side_len > 0, 'Board size should be > 0.'\n return [[random.choice(string.ascii_lowercase) for _ in range(side_len)] for _ in range(side_len)]", "def available_combinations(self):\n result = []\n\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 0:\n result.append((i, j))\n\n return result", "def get_empty_board_indecies(self):\n empty_indecies = []\n for row_num in range(len(self.board)):\n for col_num in range(len(self.board)):\n if self.board[row_num][col_num] and self.board[row_num][col_num].state == PegState.EMPTY:\n empty_indecies.append((row_num, col_num))\n return empty_indecies", "def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n n = len(board[0])\n def count(x, y):\n top = y - 1\n down = y + 1\n left = x - 1\n right = x + 1\n if top < 0:\n top = 0\n if down >= m:\n down = m - 1\n if left < 0:\n left = 0\n if right >= n:\n right = n - 1\n _count = 0\n for i in range(top, down + 1):\n for j in range(left, right + 1):\n _count += board[i][j]\n _count -= board[y][x]\n return _count\n\n\n result = [[0 for _ in range(n)] for _ in range(m)]\n for i in range(m):\n for j in range(n):\n neighbours = count(j, i)\n if board[i][j] == 0 and neighbours == 3:\n result[i][j] = 1\n if board[i][j] == 1 and (neighbours == 2 or neighbours == 3):\n result[i][j] = 1\n for i in range(m):\n for j in range(n):\n board[i][j] = result[i][j]", "def check_victory(board):\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n player = board[row][col]\n\n # not a player move\n if player == 0 or player == 9:\n continue\n\n # look right\n if col + 3 < WIDTH and player == board[row][col + 1] and player == board[row][col + 2]\\\n and player == board[row][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n if row + 3 < HEIGHT:\n\n # down\n if player == board[row + 1][col] and player == board[row + 2][col] and player == board[row + 3][col]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and right\n if col + 3 < WIDTH and player == board[row + 1][col + 1] and player == board[row + 2][col + 2]\\\n and player == board[row + 3][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and left\n if col - 3 >= 0 and player == board[row + 1][col - 1] and player == board[row + 2][col - 2] \\\n and player == board[row + 3][col - 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n\n # # if no one has won yet\n for row in range(HEIGHT):\n for col in range(WIDTH):\n if board[row][col] == 0 or board[row][col] == 9:\n return None\n\n return 0", "def random_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = random.choice([0, 1])\n else:\n a[row][col] = 0\n \n return a", "def new_game(n):\n matrix = []\n\n for i in range(n):\n matrix.append([0] * n)\n return matrix", "def __init__(self, size: int) -> None:\n if not MIN_BOARD_SIZE <= size or not MAX_BOARD_SIZE >= size or size % 2 != 0:\n raise BoardSizeError(f\"{size} is invalid size!\")\n\n self.size = size\n\n center = size // 2 - 1\n self._grid = [[disc.get_disc()] * size for _ in range(size)]\n self._grid[center][center] = disc.get_disc(Player.WHITE)\n self._grid[center][center + 1] = disc.get_disc(Player.BLACK)\n self._grid[center + 1][center] = disc.get_disc(Player.BLACK)\n self._grid[center + 1][center + 1] = disc.get_disc(Player.WHITE)", "def occupiedElements(self):\n occupiedMatrix = [[0 for x in range(self.gridSize)] for y in range(self.gridSize)]\n\n for i in range(len(self.changeable)):\n for j in range(self.length[i]):\n if self.direction[i] == \"h\":\n occupiedMatrix[self.fixed[i]][self.changeable[i] + j] = 1\n else:\n occupiedMatrix[self.changeable[i] + j][self.fixed[i]] = 1\n\n return occupiedMatrix", "def create_board(N):\n board = [[0 for x in range(N)] for y in range(N)] \n return board", "def draw_by_insufficient(self):\n \n if self.cap_counter > 100:\n self.draw_loop(\"draw due to none in 50\")\n \n if self.board.count(self.empty) == 62:\n self.draw_loop(\"draw due to insufficient\")\n\n \n if self.board.count(self.empty) == 61:\n for i in self.board:\n if i != self.empty:\n if i.graphic == piece_class.PIECEDICT[WHITE][piece_class.Bishop] or i.graphic == piece_class.PIECEDICT[BLACK][piece_class.Bishop]:\n self.draw_loop(\"draw due to insufficient\")\n if i.graphic == piece_class.PIECEDICT[WHITE][piece_class.Knight] or i.graphic == piece_class.PIECEDICT[BLACK][piece_class.Knight]:\n self.draw_loop(\"draw due to insufficient\")", "def create_board(self):\n # # empty 7x7 board\n # board = [[list() for x in range(7)] for y in range(7)]\n # # coordinates of starting marbles\n # black = [[0, 0], [1, 0], [1, 1], [0, 1], [6, 6], [6, 5], [5, 5], [5, 6]]\n # white = [[6, 0], [6, 1], [5, 1], [5, 0], [0, 6], [0, 5], [1, 5], [1, 6]]\n # red = [[1, 3], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 2], [4, 3], [4, 4], [5, 3]]\n # for marble in white:\n # board[marble[0]][marble[1]] = \"B\"\n # for marble in black:\n # board[marble[0]][marble[1]] = \"W\"\n # for marble in red:\n # board[marble[0]][marble[1]] = \"R\"\n # return board\n pass", "def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list", "def CheckVictoryCondition(self):\n opponentVictory = True\n for char in self.screen.characters:\n if char.team == 1 and char.leader and not char.dead:\n opponentVictory = False\n if opponentVictory:\n self.screen.refresh()\n self.music.stop()\n sys.exit()\n\n for victory in self.victories:\n playerVictory = True\n nextLevel = victory['next_level']\n if victory['condition'] == 'destroy':\n for char in self.screen.characters:\n if not char.dead and char.team == 2:\n playerVictory = False\n elif victory['condition'] == 'kill leaders':\n for char in self.screen.characters:\n if not char.dead and char.team == 2 and char.leader:\n playerVictory = False\n if playerVictory:\n print('You win')\n if self.music:\n self.music.stop()\n self.screen.objects = []\n self.screen.tileEffects = []\n self = Level(self.screen, nextLevel)", "def new_generation(board):\n # size = board.shape\n new = np.zeros(board.shape)\n for row in range(board.shape[0]):\n for col in range(board.shape[1]):\n if calculate_dead_alive(board, row, col):\n new[row][col] = 1\n else:\n new[row][col] = 0\n return new", "def initSituation(game):\r\n \r\n situation = [ ['' for x in range (3) ] for y in range (3) ]\r\n return situation", "def actions(board):\n available = set()\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n available.add((i, j))\n\n return available", "def create(self):\n\n for i in range(8):\n # Create white pawns\n self.board[1][i] = Piece(\"pawn\", 1, i, 0)\n # Create black pawns\n self.board[6][i] = Piece(\"pawn\", 6, i, 1)\n\n # Create white rooks\n self.board[0][0] = Piece(\"rook\", 0, 0, 0)\n self.board[0][7] = Piece(\"rook\", 0, 7, 0)\n\n # Create black rooks\n self.board[7][0] = Piece(\"rook\", 7, 0, 1)\n self.board[7][7] = Piece(\"rook\", 7, 7, 1)\n\n # Create white knights\n self.board[0][1] = Piece(\"knight\", 0, 1, 0)\n self.board[0][6] = Piece(\"knight\", 0, 6, 0)\n\n # Create black knights\n self.board[7][1] = Piece(\"knight\", 7, 1, 1)\n self.board[7][6] = Piece(\"knight\", 7, 6, 1)\n\n # Create white bishop\n self.board[0][2] = Piece(\"bishop\", 0, 2, 0)\n self.board[0][5] = Piece(\"bishop\", 0, 5, 0)\n\n # Create black bishop\n self.board[7][2] = Piece(\"bishop\", 7, 2, 1)\n self.board[7][5] = Piece(\"bishop\", 7, 5, 1)\n\n # Create white queen and king\n self.board[0][3] = Piece(\"queen\", 0, 3, 0)\n self.board[0][4] = Piece(\"king\", 0, 4, 0)\n\n # Create black queen and king\n self.board[7][3] = Piece(\"queen\", 7, 3, 1)\n self.board[7][4] = Piece(\"king\", 7, 4, 1)", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def show_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n poss = list(self.possibles[row][col])\n if poss:\n teil = qbwrdd.Tile(poss, self.board.scene)\n teil.cell = \"poss\"\n cell = row * self.board_size + col\n pos_x, pos_y = self.board.cells[cell].x(), self.board.cells[cell].y()\n if col % 3 > 0:\n pos_x += 2\n self.poss_tiles[row][col] = teil\n teil.draw_tile_at(pos_x, pos_y)", "def game_of_life():\n # 3x3 neighbourhood\n offsets = [[(y, x) for y in range(-1, 2)] for x in range(-1, 2)]\n\n # Create mappings\n mappings = {}\n for i in range(2 ** 9):\n\n # Determine the initial state (key)\n key = f\"{bin(i)[2:]:0>9}\" # As binary string\n key = tuple(k == \"1\" for k in key) # As tuple of bools\n key = tuple(key[i * 3:i * 3 + 3] for i in range(3)) # Reshape into 2D grid\n\n # Alive counts\n centre = key[1][1]\n others = sum(sum(row) for row in key) - centre\n\n # Skip if state does not evaluate to True\n if centre:\n if others not in (2, 3):\n continue\n\n else:\n if others != 3:\n continue\n\n mappings[key] = True\n\n return Mapping2DRuleset(mappings, offsets)", "def generate_board(self):\n random.seed(self.seed)\n for row in self.grid:\n for column in row:\n probability = random.random()\n if self.live_probability > probability:\n column.set_alive()", "def new_board(n: int) -> Board:\n\n return [[0 for _ in range(n)] for _ in range(n)]", "def get_conditions(info):\n conditions = info.columns\n # This condition's unique value should be less than 5\n new_conditions = list()\n for c in conditions:\n try:\n n_cond = len(pd.unique(info[c]))\n if 1 < n_cond < 5:\n new_conditions.append(c)\n except TypeError:\n pass\n \n return new_conditions", "def get_possible_moves(self) -> list:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n if p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2:\n return []\n moves = []\n for letter in self.current_board:\n if letter.isalpha():\n moves.append(letter)\n return moves", "def gen_moves_list(self,color='',dontCallIsAttacked=False):\n \n if(color==''):\n color=self.side2move\n mList=[]\n \n # For each 'piece' on the board (pos1 = 0 to 63)\n for pos1,piece in enumerate(self.cases):\n \n # Piece (or empty square) color is not the wanted ? pass\n if piece.couleur!=color:\n continue\n \n if(piece.nom=='ROI'): # KING\n mList+=piece.pos2_roi(pos1,self.oppColor(color),self,dontCallIsAttacked)\n continue\n \n elif(piece.nom=='DAME'): # QUEEN = ROOK + BISHOP moves !\n mList+=piece.pos2_tour(pos1,self.oppColor(color),self)\n mList+=piece.pos2_fou(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='TOUR'): # ROOK\n mList+=piece.pos2_tour(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='CAVALIER'): # KNIGHT\n mList+=piece.pos2_cavalier(pos1,self.oppColor(color),self)\n continue\n \n elif(piece.nom=='FOU'): # BISHOP\n mList+=piece.pos2_fou(pos1,self.oppColor(color),self)\n continue\n \n if(piece.nom=='PION'): # PAWN\n mList+=piece.pos2_pion(pos1,piece.couleur,self)\n continue\n \n return mList", "def reduce_board(size: int, rows: [[int]], cols: [[int]]):\n clauses = []\n\n base = size * size\n for i, row in enumerate(rows):\n uvars = [j+(i*size)+1 for j in range(size)]\n reduction = reduce_set(size, row, uvars, base)\n clauses += reduction.clauses\n base += len(reduction.auxvars)\n\n for i, col in enumerate(cols):\n uvars = [i+(j*size)+1 for j in range(size)]\n reduction = reduce_set(size, col, uvars, base)\n clauses += reduction.clauses\n base += len(reduction.auxvars)\n\n cnf = CNF()\n for clause in clauses:\n cnf.append(clause)\n\n return cnf" ]
[ "0.66391665", "0.62301445", "0.6140845", "0.60436356", "0.6038024", "0.5961014", "0.5920955", "0.58922297", "0.5841574", "0.581635", "0.57947874", "0.5757776", "0.57379335", "0.57237613", "0.570596", "0.56783783", "0.5668432", "0.56344336", "0.5629953", "0.56038624", "0.5601722", "0.5581705", "0.5578274", "0.556819", "0.5564179", "0.5550873", "0.55455333", "0.5540024", "0.5540009", "0.55337083", "0.5533662", "0.5516096", "0.55089325", "0.5507076", "0.54970855", "0.5490851", "0.54900646", "0.54830074", "0.5478393", "0.5478291", "0.547709", "0.5476522", "0.54760236", "0.5472086", "0.5467071", "0.5451843", "0.5444031", "0.5442943", "0.54403013", "0.54353696", "0.543244", "0.5416281", "0.5416019", "0.54029673", "0.5393951", "0.5390517", "0.5387643", "0.5377858", "0.5375567", "0.5371803", "0.5370776", "0.5366835", "0.5365917", "0.5361488", "0.5355183", "0.5354314", "0.5349021", "0.5346213", "0.53450775", "0.5339402", "0.5336056", "0.53220594", "0.531878", "0.53167063", "0.5313205", "0.5312626", "0.5311682", "0.5309736", "0.53048253", "0.5300901", "0.52925223", "0.52909917", "0.5290404", "0.5289929", "0.52881104", "0.5266763", "0.5266602", "0.5264942", "0.5254013", "0.5250367", "0.52503026", "0.52495414", "0.5247267", "0.52430916", "0.52380747", "0.5237673", "0.5231427", "0.5231081", "0.52303636", "0.52278286" ]
0.8714315
0
Plays Tic Tac Toe
def TicTacToe(): #Written by Cody West current_board = [" "," "," "," "," "," "," "," "," "] #Empty board players = 0 #Number of players human_turn = 0 #Indicates whether the human goes first or second (is 0 for two player games) turn = 1 #Turn number while players != 1 and players != 2: #While a valid number of players has not been chosen players = int(raw_input("How many players are there?")) #Asks how many players there are if players < 1 or players > 2: #If the choice is not valid print("Please pick 1 or 2 players") #Prints error message if players == 1: #If 1 player difficulty = 0 #Difficulty variable while difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #While a valid difficulty has not been chose difficulty = int(raw_input("Pick a difficulty. 1 is easiest, 4 is hardest")) #Ask for a difficulty if difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #If difficulty choice is not valid print("Please pick a difficulty between 1 and 4") #Prints error message while human_turn != 1 and human_turn != 2: #While a human turn has not been chosen human_turn = int(raw_input("Would you like to go first (1) or second (2)?")) #Ask for human turn if human_turn != 1 and human_turn != 2: #If a valid turn is not chosen print("Please pick turn 1 or 2") #Print error message if human_turn == 1: #If human goes first player1 = "human" #Player 1 is human player2 = "AI" #Player 2 is AI elif human_turn == 2: #If human goes second player1 = "AI" #Player 1 is AI player2 = "human" #Player 2 is human else: #If neither player1 = "human" #Player 1 is human player2 = "human" #Player 2 is human while turn < 10: #While the number of turns in Tic Tac Toe has not been exceeded if turn < 3: #For the first three turns draw_example_board() #Draw a board showing the slot numbers draw_board(current_board) #Draw current board ## You could write this logic much more compactly -- try to avoid having so many ## lines of code that look identical. You have four different update_board calls ## here where you could have just one. if turn%2 == 1: #If it's an odd numbered turn if player1 == "human": print("human") update_board(current_board, get_input(current_board, turn), "X") #Update board with player 1's selection and X else: print("AI") update_board(current_board, AI(current_board,"X","O", difficulty), "X") #Update board with AI selection else: if player2 == "human": print("human") update_board(current_board, get_input(current_board, turn), "O") #Update board with player 2's selection and X else: print("AI") update_board(current_board, AI(current_board,"O","X", difficulty), "O") #Update board with AI selection if check_victory(current_board) == "done": return "whatever"#Check victory turn = turn + 1 #Increase turn number
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tic_tac_toe(board, player_1, player_2):\n # do an initial clear\n os.system('clear')\n winner = False\n cur_player = player_1\n player_num = 1\n # do Tic-Tac-Toe until we have found a winner\n while not winner:\n print_board(board)\n move = raw_input('\\n%s, where would you like to go? ' % (cur_player))\n row, col = parse_move(move)\n # if we couldn't parse the move then try again\n if row == None or col == None:\n os.system('clear')\n print \"I didn't recognize your move!\"\n print \"Make sure your move is a row and column with no spaces (A4)\\n\"\n continue\n # if they moved somewhere there is already a mark then try again\n if not valid_move(board, row, col):\n os.system('clear')\n print \"You can't move there! Try again.\\n\"\n continue\n # mark the move on the board\n make_move(board, player_num, row, col)\n # see if there is a winner\n winner = get_winner(board)\n # switch turns\n cur_player = player_2 if cur_player == player_1 else player_1\n player_num = 2 if player_num == 1 else 1\n os.system('clear')\n # the winner will either be 1 or 2. If 1 then outcome is True (for player 1)\n outcome = True if winner == 1 else False\n return outcome", "def PlayTicTacToe(numPlayers):\n\tteams = {} # maps the teams onto players or computer\n\tif numPlayers == 0:\n\t\tteams['X'] = 'C'\n\t\tteams['O'] = 'C'\n\telif numPlayers == 1:\n\t\tteams['X'] = 'H'\n\t\tteams['O'] = 'C'\n\telse:\n\t\tteams['X'] = 'H'\n\t\tteams['O'] = 'H'\n\n\tnumberBoard = (\n\t\t\t('0', '1', '2'),\n\t\t\t('3', '4', '5'),\n\t\t\t('6', '7', '8')\n\t\t)\n\tprint('Thank you. The board is numbered like this:')\n\tprint(StringFromBoard(numberBoard))\n\tturn = 'X'\n\tboard = [\n\t\t\t[' ', ' ', ' '],\n\t\t\t[' ', ' ', ' '],\n\t\t\t[' ', ' ', ' ']\n\t\t]\n\tnextMover = 'X'\n\tgame = []\n\twhile True:\n\t\tindex = IndexBoard(board)\n\t\tgame.append('I {}'.format(index))\n\t\tnextPlayer = teams[nextMover]\n\t\tif nextPlayer == 'H':\n\t\t\tmove = GetNextMove(board, index, teams, nextMover)\n\t\telse:\n\t\t\tmove = GetComputerMove(board, index, nextMover)\n\t\t\tprint('The Computer has chosen {}.'.format(move))\n\t\tMove(board, nextMover, move)\n\t\tgame.append('M {} {}'.format(nextMover, move))\n\t\tprint(StringFromBoard(board))\n\n\t\tcanonicalBoard, index, rotations, flips = CanonicalizeBoard(board)\n\t\tif rotations > 0:\n\t\t\tprint('Rotate {} times'.format(rotations))\n\t\t\tgame.append('R {}'.format(rotations))\n\t\tif flips > 0:\n\t\t\tprint ('Flip Horizontally')\n\t\t\tgame.append('F {}'.format(flips))\n\t\tif rotations > 0 or flips > 0:\n\t\t\tboard = canonicalBoard\n\t\t\tprint(StringFromBoard(board))\n\t\t\n\t\tif IsWinner(board, nextMover):\n\t\t\tprint ('{} is the Winner!'.format(nextMover))\n\t\t\tgame.append('W {}'.format(nextMover))\n\t\t\tbreak\n\t\t\n\t\tif IsCatsGame(board):\n\t\t\tprint(\"No winner! Cat's game.\")\n\t\t\tgame.append('C')\n\t\t\tbreak\n\n\t\tif nextMover == 'X':\n\t\t\tnextMover = 'O'\n\t\telse:\n\t\t\tnextMover = 'X'\n\tLearnFromGames(game)\n\treturn game", "def test_win(self):\n self.T.board[0] = ['x']*3\n assert self.T.tic_tac_toe(self.T.board)", "def play(self):\n board = Board()\n print(\"Let's play tic-tac-toe against computer!\")\n print(\"Here is your board!\")\n count = 1\n print(board)\n while True:\n board.person_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n board.make_computer_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n print(f\"Board after {count} action.\")\n count += 1\n print(board)", "def play_turn(self, cur_board):\n pass", "def play(self):\n\n value = 0 #player dictionary key\n player = {0: 'O', 1: 'X'}\n\n moveCount = 0 #how many moves have occurred. also doubles as the self.order index.\n turn = \"\"\n while moveCount < self.n**2 and self.go == \"Tie\":\n value = not value\n turn = player[value] #X starts\n key = self.order[moveCount]\n i = key[0]\n j = key[1]\n\n\n# self.rows[i][0] == homogenous?\n# self.rows[i][1] == X/O?\n# self.rows[i][2] == count of X's/O's?\n\n# Check to see if row i is 'homogenous' (contains only X's or O's):\n if self.rows[i][0]:\n\n# Check to see if any square in row i has been played. If it has been played,\n# check to see if it was the same person who's current turn it is.\n if self.rows[i][1] == \"\" or player[value] == self.rows[i][1]:\n\n# Mark the column with the current person's token (X or O).\n# Admittedly, this could be improved to not update every time.\n self.rows[i][1] = turn\n\n# Update the count by one.\n self.rows[i][2] += 1\n\n# If the count is equal to the board size, end the game and return who won and how.\n if self.rows[i][2] == self.n:\n self.go = (turn, 'row ' + str(i))\n\n# If the current person who's turn it is,\n# is not the same as the previous player who played this row,\n# set this row's 'homogenous' attribute to false.\n else:\n self.rows[i][0] = False\n\n if self.cols[j][0]:\n if self.cols[j][1] == \"\" or player[value] == self.cols[j][1]:\n self.cols[j][1] = turn\n self.cols[j][2] += 1\n if self.cols[j][2] == self.n:\n self.go = (turn, 'column ' + str(j))\n else:\n self.cols[j][0] = False\n\n# On boards of odd-sized 'n' (n = 3,5,7,etc...)\n# the middle square is part of both diagonals: 'step' and 'same':\n if i == j:\n if self.diags['same'][0]:\n if self.diags['same'][1] == \"\" or player[value] == self.diags['same'][1]:\n self.diags['same'][1] = turn\n self.diags['same'][2] += 1\n if self.diags['same'][2] == self.n:\n self.go = (turn, 'diagonal from 0,0 to n-1,n-1')\n else:\n self.diags['same'][0] = False\n\n if i + j + 1 == self.n:\n if self.diags['step'][0]:\n if self.diags['step'][1] == \"\" or player[value] == self.diags['step'][1]:\n self.diags['step'][1] = turn\n self.diags['step'][2] += 1\n if self.diags['step'][2] == self.n:\n self.go = (turn, 'diagonal from n-1,0 to 0,n-1')\n else:\n self.diags['step'][0] = False\n\n moveCount += 1\n print(turn, key)\n else:\n return self.go", "def computer_play( game ):\n\n grid = game.get_grid()\n\n diag = game.checkDiagonals()\n row = game.checkRows()\n column = game.checkColumns()\n\n if isinstance(diag, tuple):\n \n for x in diag[1]:\n try:\n x = int(x)\n print(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue\n\n elif isinstance(row, tuple):\n\n for x in row[1]:\n try:\n x = int(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue\n\n elif isinstance(column, tuple):\n\n for x in column[1]:\n try:\n x = int(x)\n if isinstance(x, int):\n if game.set_mark('O', x):\n return\n\n except ValueError:\n continue \n\n for x in list(range(1,10)):\n if game.set_mark('O', x):\n return\n else:\n continue", "def play():\n\tprint(\"Welcome to TIC TAC TOE!\")\n\tboard, player_mark, message, turn_counter = initialize_game();\n\twhile player_mark != \"GG\":\n\t\tdisplay_game(board, message)\n\t\trow,col = get_coordinates()\n\t\tboard, player_mark, turn_counter = update_game(board, row, col, player_mark, turn_counter)\n\t\tplayer_mark, message = check_status(board, player_mark, turn_counter)\n\telse:\n\t\tdisplay_game(board, message)", "def player(board):\n plays = 0\n\n # Count non-empty squares\n for i in range(3):\n for j in range(3):\n if board[i][j] != EMPTY:\n plays += 1\n\n # Even number of plays -> X's turn\n if plays % 2 == 0:\n return X\n else:\n return O", "def take_turn(self, row, col):\n if self.game_is_over:\n return\n if row < 0 or row > 2 or col < 0 or col > 2:\n return\n if self.board[row][col] != '.':\n return\n\n if self.turn_counter % 2 == 0:\n self.board[row][col] = 'X'\n pygame.display.set_caption(\"O's Turn\")\n else:\n self.board[row][col] = 'O'\n pygame.display.set_caption(\"X's Turn\")\n\n self.turn_counter = self.turn_counter + 1\n if self.turn_counter >= 9:\n self.game_is_over = True\n pygame.display.set_caption(\"Tie Game\")\n\n self.check_for_game_over()", "def play(self, event):\n\n # locate second column and row when player click on a square\n colrow_tuple = self.board.find_coords_of_selected_sq(event)\n\n # save the col and row as variable\n corner_two_col, corner_two_row = colrow_tuple[0], colrow_tuple[1]\n\n # calculations to get the key to help locate specific square on\n # the unused dictionary of squares left to play\n col_fl, row_fl = self.board.floor_of_row_col(event.x, event.y)\n rowcol_key = self.board.convert_to_key(col_fl, row_fl)\n\n try:\n self.unused_squares_dict[rowcol_key]\n except KeyError:\n return\n\n if self.player1_turn == True:\n self.add_to_player_sq(rowcol_key, self.player1.selected_sq)\n\n # delete from game unused dictionary of set\n self.delete_used_sq(rowcol_key)\n\n self.board.color_selected_sq(event,\n corner_two_col,\n corner_two_row,\n self.player1.color)\n\n # check game for 3 conditions: a tie, player1 win, or player2 win\n self.check_for_winner(self.player1.selected_sq, self.player1.name)\n\n # switch turn\n self.player1_turn = False\n\n else: # player2's turn\n self.board.color_selected_sq(event,\n corner_two_col,\n corner_two_row,\n self.player2.color)\n\n self.add_to_player_sq(rowcol_key, self.player2.selected_sq)\n self.delete_used_sq(rowcol_key)\n self.check_for_winner(self.player2.selected_sq, self.player2.name)\n self.player1_turn = True", "def player(board):\n #X ALWAYS gets first move, alternates with each additional move\n curr_moves = actions(board)\n if (board == initial_state()):\n return X\n if(len(curr_moves) % 2 == 0):\n return O\n else:\n return X", "def player(board):\n turn = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] != EMPTY:\n turn+=1\n if turn % 2 != 0:\n return O\n else:\n return X", "def play_game():\n display_board()\n while ongoing_game:\n handle_turn(current_player)\n check_if_game_over()\n swap_player()\n global board\n if winner == \"X\" or winner == \"O\":\n print(\"<-------- Congratulations \" +\n winner + \", you win. -------->\")\n play_again()", "def player(board):\n count = 0\n for row in range(len(board)):\n for col in range(len(board[row])):\n if board[row][col] == X or board[row][col] == O:\n count += 1\n\n if count % 2 == 0:\n return X\n else:\n return O", "def gameTic(self):\n boxId = -1\n\n if self.currentplayer == 1:\n boxId = self.player1.play(self.gameState, self.currentplayer)\n if self.currentplayer == -1:\n boxId = self.player2.play(self.gameState, self.currentplayer)\n\n if self.validmove(boxId):\n self.makemove(boxId)\n self.checkWin()", "def play(self):\n if self.quit:\n return\n game_type= self.game_mode[0]\n difficulty= self.game_mode[1]\n ini = time()\n self.board = [[0]+[Piece(i%2,1) for i in range(self.board_size-2)]+[0]]+[[Piece(i%2,0)]+[0 for i in range(self.board_size-2)]+[Piece(i%2,0)] for i in range(self.board_size-2)]+[[0]+[Piece(i%2,1) for i in range(self.board_size-2)]+[0]]\n self.buttons = [[i for i in range(self.board_size)] for j in range(self.board_size)]\n self.selected = None\n self.active_player = 0\n self.quit = False\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n self.buttons[i][j] = pygame.draw.rect(self.window, ((i+j)%2*255, (i+j)%2*255, (i+j)%2*255), (20+j*100, 20+i*100, 100, 100))\n self.draw()\n if game_type==1:\n self.play_pvp()\n elif game_type==2:\n self.play_pvc(difficulty)\n elif game_type==3:\n difficulty2 = self.game_mode[2]\n self.play_cvc(difficulty, difficulty2)", "def play(self):\n self.populateGrid()\n self.displayGrid()\n while not self.isGameOver():\n x1, y1 = self.getCoordinates(1) #input(\"Enter coordinates of first card \")\n card1 = self.grid[x1 - 1][y1 - 1]\n x2, y2 = self.getCoordinates(2) #input(\"Enter coordinates of second card \")\n card2 = self.grid[x2 - 1][y2 - 1]\n if (x1, y1) == (x2, y2): #coordinates must be different for the two cards\n print(\" ***Coordinates for the two cards must be different, try again***\")\n continue #continue with while loop\n if card1.rank == card2.rank: #an identical pair\n #print('identical pair')\n card1.face = 'up'\n card2.face = 'up'\n else:\n print(\" Not an identical pair. Found %d at (%d,%d) and %d at (%d,%d)\" \\\n % (card1.rank, x1, y1, card2.rank, x2, y2))\n self.displayGrid()", "def player(board):\n x_turn = 0\n o_turn = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n x_turn += 1\n elif board[i][j] == O:\n o_turn += 1\n if x_turn == 0 and o_turn == 0:\n return X\n elif x_turn > o_turn:\n return O\n elif x_turn == o_turn:\n return X\n return X", "def computer_play(self):\r\n # Depending on game flow, helped randomize when smack showed up\r\n # This is more of an Easter Egg than anything.\r\n if (self.tr.disks_on_board != 0 and (self.tr.disks_on_board % 6 == 0 or\r\n self.tr.disks_on_board % 6 == 3) and self.tr.turn_tracker):\r\n self.ai.talk_smack()\r\n # Computer identifies possible moves to analyze\r\n for item in self.tr.computer_moves:\r\n self.ai.coordinate_extractor(item)\r\n # Computer chooses move\r\n choice = self.ai.choose_move()\r\n # Makes play\r\n choice = self.tr.bd.disks[choice[0]][choice[1]]\r\n self.ai.moves_reset()\r\n choice.color, choice.display_on = 1, True\r\n choice.chain()\r\n # Checks for player move, if none, checks for another move\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n # If none, ends game\r\n else:\r\n if not self.tr.game_over:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def self_play_visualisation(board_size=BOARD_SIZE):\n policy_value = SimpleCNN([board_size, board_size, 2])\n history, winner = play_game(policy_value=policy_value)\n print(\"Watching game replay\\nPress Return to advance board\")\n for state, board, hoice in history:\n print(state)\n input(\"\")\n\n if winner == 1:\n print(\"Black won\")\n else:\n print(\"White won\")", "def playing(player, grid):\n\n\t# Determine the current player and define the colors to use to fill the spots of the grid he chose\n\tif player == Data.current_player['Activator']:\n\t\tcase = colors.GREEN + '[A]' + colors.STOP\n\t\tno_case = colors.RED + '[I]' + colors.STOP\n\t\tc_player = colors.GREEN + player + colors.STOP\n\t\tprint('Joueur actuel : ' + colors.GREEN + player + colors.STOP)\n\n\telse:\n\t\tcase = colors.RED + '[I]' + colors.STOP\n\t\tno_case = colors.GREEN + '[A]' + colors.STOP\n\t\tc_player = colors.RED + player + colors.STOP\n\t\tprint('Joueur actuel : ' + colors.RED + player + colors.STOP)\n\n\tend = check_end(case, no_case, grid)\n\n\tif end == False:\n\t\tcoordXY, grid, taken_cases = check_position(c_player, case, no_case, grid)\n\n\t\t# Modifies grid with the informations given by the player\n\t\tgrid[coordXY] = case\n\t\tupdate_grid(grid, taken_cases, case)\n\n\treturn grid, end", "def click(self, event):\n x = self.ptgrid(event.x)\n y = self.ptgrid(event.y)\n \n # x = loc[0]\n # y = loc[1]\n\n # if self.gamestate == self.STATE_TITLE_SCREEN:\n # self.new_board()\n # self.gamestate = FIRST_PLAYER\n\n\n #duplication /!\\\n if (self.board[y][x] == self.EMPTY and self.p2pGame.isReady):\n if(self.p2pGame.playerTurn == 'X' and self.player == 1):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--X:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'O'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif(self.p2pGame.playerTurn == 'O' and self.player == 2):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--O:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif self.gamestate == self.STATE_GAME_OVER:\n #reset\n self.new_board()\n self.gamestate = self.FIRST_PLAYER\n self.p2pGame.sendPlayAgain(\"--A\")", "def make_turn(self):\n # if play first, start in the middle\n if np.count_nonzero(self.board) == 0:\n self.place_disc(self.board.shape[1] / 2)\n return 1\n\n\n # win if possible\n for try_column in range(0,self.board.shape[1]):\n if 0 == self.board[0, try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id())\n if dhw.did_he_win(new_board, self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't loose if in danger\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id())\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n self.place_disc(try_column)\n return 1\n\n # don't fall in trap!\n forbidden_columns = []\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, self.id()) # my move\n new_board = self.simulate_place_disc(new_board, try_column, 3 - self.id()) # enemy move\n if dhw.did_he_win(new_board, 3 - self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # don't ruin my trap\n for try_column in range(0, self.board.shape[1]):\n if 0 == self.board[0,try_column]:\n new_board = self.simulate_place_disc(self.board, try_column, 3 - self.id()) # 'my' move\n new_board = self.simulate_place_disc(new_board, try_column, self.id()) # my move\n if dhw.did_he_win(new_board, self.id(), try_column):\n if try_column not in forbidden_columns: forbidden_columns.append(try_column)\n\n # allow forbidden_columns if no other choice\n if np.count_nonzero(self.board[0, :]) == self.board.shape[1] - len(forbidden_columns):\n forbidden_columns = []\n\n # otherwise, play randomly\n rannum = random.randrange(7)\n while 0 != self.board[0, rannum] or rannum in forbidden_columns:\n rannum = random.randrange(7)\n self.place_disc(rannum)\n return 1", "def play(auto_A: bool = False, auto_B: bool = False) -> None:\n board: Board = Board()\n player: Player = Player.A\n move_count: int = 1\n cell_number: int = 0\n print('Welcome to the Tic-Tac-Toe game!')\n while move_count <= 9:\n print(board.display())\n player_name: str = player.display()\n\n if player == Player.A and auto_A or player == Player.B and auto_B:\n cell_number = minimax(board, player)\n print(f'[{move_count}] Player {player_name} moved to {cell_number}.')\n else:\n response: str = input(f'[{move_count}] Player {player_name}, enter your move or q to exit: ')\n\n if response == 'q':\n print(\"Game exited.\")\n break\n\n response_error: Optional[str] = validate_response(response)\n if response_error is not None:\n print(f'ERROR! {response_error}')\n continue\n\n cell_number = int(response)\n if not board.is_cell_empty(cell_number):\n print(f'ERROR! Cell number {cell_number} is not empty.')\n continue\n\n board = board.make_move(player, cell_number)\n if board.is_win(player):\n print(board.display())\n print(f'Congratulations, player {player_name}! You won in {move_count} moves.')\n break\n else:\n move_count += 1\n player = player.switch()\n if move_count > 9:\n print(board.display())\n print('This game has ended in a draw.')", "def play_game(self):\n while self.over is False:\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p1.get_move(self.board)\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p2.get_move(self.board)", "def play(self):\n \n while True:\n self.print_board()\n self.display_board()\n winner = self.is_game_won()\n if winner or self.is_filled():\n break\n \n if self.turn == _PLAYER:\n col = self.human_turn()\n else:\n col = self.ai_turn()\n\n row = self.get_row_for_col(col)\n self.board[7 * row + col] = self.turn\n self.last_play_rc = row, col\n\n if self.debug:\n print(\"position scores:\",\n \"player=\", score_position(self.board, _PLAYER),\n \"ai=\", score_position(self.board, _AI))\n \n self.turn = _AI if self.turn == _PLAYER else _PLAYER\n \n if winner == 0:\n msg = \"Tie!\"\n elif winner == 1:\n msg = \"You win!\"\n else:\n msg = \"I win!\"\n \n oled.text(msg, 64, 30)\n oled.show()\n print(\"\\n\" + msg + \"\\n\")\n \n if winner == 0 or winner == 1:\n if self.plies == 3:\n print(\"\"\"\n(Of course, you did set me to easy mode, which I feel compelled to mention.)\n\"\"\")\n print(\"\"\"\n\nThere are some interesting things to learn about ConnectFour:\n\n {url}\n\nTo move ahead:\n\n >>> import sensors\n >>> sensors.start()\n\n\"\"\".format(url=url(\"connectfour\")))\n\n else:\n print(\"\"\"\nWow. You were beat by a $4 computer--using only one of my processors (!!).\nTo get the code to move ahead, you'll need to at least tie me.\n\nTo play again, make a new instance of the ConnectFour class. You can choose\ndifferent options than the defaults:\n\n connectfour.ConnectFour(plies, start_player, serial_input, debug)\n - plies [5]: moves to look ahead (3-6, where 3 is easy and 6 is slow and hard\n - start_player [0]: 0 for random, 1 for you, 2 for me\n - serial_input [False]: Enter moves w/keyboard in terminal instead of knob\n - debug [False]: Show information about current AI evaluation scores\n\nFor example:\n\n >>> g = ConnectFour(plies=4, start_player=1)\n >>> g.play()\n\n\"\"\")", "def play_turn(self, player):\n input('Play turn...')\n print(f'{player.name} to play...\\n')\n \n if isinstance(player, ComputerPlayer):\n print('Thinking...')\n time.sleep(1)\n row, col = player.algorithm(self.board)\n self.board.play(row, col, player.token) # algorithms index from (0,0) - so adjust this to (1,1) etc \n else:\n print(self.board)\n while True:\n usr_input = input(f'{player.name}, enter a move: ')\n \n if usr_input.lower() == 'exit':\n print(f'{player.name} exited!')\n self.exit_flag = True\n return\n\n if usr_input.lower() == 'skip':\n print(f'{player.name} has skipped their go!')\n return\n\n row, col = [int(i) for i in usr_input.split(' ')]\n try:\n self.board.play(row - 1, col - 1, player.token) # index top-left corner as (1,1) in player input, vs (0,0) everywhere else\n except IndexError as e:\n print(str(e), 'Play a different position.')\n else:\n break\n print(f'{player.name} played: ({row + 1}, {col + 1})\\n')\n print(self.board)", "def player_play(self, color, x, y):\r\n self.tr.bd.disks[x][y].color,\r\n self.tr.bd.disks[x][y].display_on = color, True\r\n self.tr.bd.disks[x][y].chain()\r\n self.tr.board_scan_reset()\r\n # Checks for computer move, if none, then checks for another move\r\n if self.tr.board_scan():\r\n self.delay = frameCount\r\n return\r\n else:\r\n self.tr.board_scan_reset()\r\n if not self.tr.board_scan():\r\n return\r\n # If none, ends game.\r\n else:\r\n self.tr.board_scan_reset()\r\n self.tr.scanner()\r\n self.tr.game_over = True\r\n self.tr.run_game_is_over = frameCount", "def play(game, pick):\n\n # btn1\n if pick == 1:\n if game[0,0] != 0:\n game[0,0] = 0\n elif game[0,0] != 1:\n game[0,0] = 1\n \n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n\n # btn2\n if pick == 2:\n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n \n if game[0,0] != 0:\n game[0,0] = 0\n elif game[0,0] != 1:\n game[0,0] = 1\n \n if game[0,2] != 0:\n game[0,2] = 0\n elif game[0,2] != 1:\n game[0,2] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n\n # btn3\n if pick == 3:\n if game[0,2] != 0:\n game[0,2] = 0\n elif game[0,2] != 1:\n game[0,2] = 1\n \n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n \n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n\n # btn4\n if pick == 4:\n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[0,0] != 0:\n game[0,0] = 0\n elif game[0,0] != 1:\n game[0,0] = 1\n \n if game[2,0] != 0:\n game[2,0] = 0\n elif game[2,0] != 1:\n game[2,0] = 1\n\n # btn4\n if pick == 5:\n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n \n if game[0,1] != 0:\n game[0,1] = 0\n elif game[0,1] != 1:\n game[0,1] = 1\n \n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n\n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if pick == 6:\n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[0,2] != 0:\n game[0,2] = 0\n elif game[0,2] != 1:\n game[0,2] = 1\n \n if game[2,2] != 0:\n game[2,2] = 0\n elif game[2,2] != 1:\n game[2,2] = 1\n \n if pick == 7:\n if game[2,0] != 0:\n game[2,0] = 0\n elif game[2,0] != 1:\n game[2,0] = 1\n \n if game[1,0] != 0:\n game[1,0] = 0\n elif game[1,0] != 1:\n game[1,0] = 1\n \n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n \n if pick == 8:\n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n \n if game[1,1] != 0:\n game[1,1] = 0\n elif game[1,1] != 1:\n game[1,1] = 1\n \n if game[2,0] != 0:\n game[2,0] = 0\n elif game[2,0] != 1:\n game[2,0] = 1\n \n if game[2,2] != 0:\n game[2,2] = 0\n elif game[2,2] != 1:\n game[2,2] = 1\n \n if pick == 9:\n if game[2,2] != 0:\n game[2,2] = 0\n elif game[2,2] != 1:\n game[2,2] = 1\n \n if game[1,2] != 0:\n game[1,2] = 0\n elif game[1,2] != 1:\n game[1,2] = 1\n \n if game[2,1] != 0:\n game[2,1] = 0\n elif game[2,1] != 1:\n game[2,1] = 1\n return(game)", "def mc_trial(board, player):\r\n while(board.check_win() == None):\r\n blankies = board.get_empty_squares()\r\n target = random.choice(blankies)\r\n board.move(target[0],target[1],player)\r\n if player == provided.PLAYERX:\r\n player = provided.PLAYERO\r\n else:\r\n player = provided.PLAYERX", "def _play(self, func):\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n\n while self._board.possible() != []:\n self._board.move_computer()\n print('\\ncomputer movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is computer')\n return\n\n func()\n print('\\nhuman movement:\\n')\n print(self._board)\n if self._board.check_win():\n print('\\nwinner is human')\n return\n print('\\nwinner is friendship :)')", "def play_game():\n pass", "def main():\r\n global win\r\n global boxes\r\n windowsize = 3\r\n squares = 3\r\n boxes = windowsize/ squares\r\n\r\n \r\n for i in range(squares - 1):\r\n xline = Line(Point(0, (windowsize/squares) * (i + 1)), Point(windowsize, (windowsize/squares) * (i + 1)))\r\n xline.draw(win)\r\n yline = Line(Point((windowsize/squares) * (i + 1), 0), Point((windowsize/squares) * (i + 1), windowsize))\r\n yline.draw(win)\r\n\r\n for i in range((squares ** 2) // 2):\r\n printed_text = Text(Point(100,15),\"Player 1: click a square.\").draw(win)\r\n p1mouse = win.getMouse()\r\n p1x = p1mouse.getX()\r\n p1y = p1mouse.getY()\r\n tic_tac_toe_x(win, p1x, p1y)\r\n printed_text.undraw()\r\n\r\n printed_text = Text(Point(100,15),\"Player 2: click a square.\").draw(win)\r\n \r\n p2mouse = win.getMouse()\r\n p2x = p2mouse.getX()\r\n p2y = p2mouse.getY()\r\n tic_tac_toe_o(win, Point(p2x, p2y))\r\n printed_text.undraw()\r\n\r\n if squares % 2 == 1:\r\n printed_text = Text(Point(100,15), \"Player 1: click a square.\").draw(win)\r\n p1mouse = win.getMouse()\r\n p1x = p1mouse.getX()\r\n ply = p1mouse.getY()\r\n tic_tac_toe_x(win, p1x, p1y)\r\n printed_text.undraw()", "def play_game(self):\n while True:\n\n for player in self.players:\n print(self.board)\n print(\"Your turn player {}\".format(player))\n\n self.play_turn_for_player(player)\n\n if self.board.is_draw():\n print(\"Its a draw!\")\n return \"draw\"\n\n elif self.board.is_victory(player.icon):\n print(self.board)\n print(\"{} Wins! Congrats!\".format(player.icon))\n return player.name", "def player(board):\n\n # Game is over\n if terminal(board):\n return None\n\n # Count number of occurences of X and O\n x_count = 0\n o_count = 0\n for row in board:\n for box in row:\n if box == X:\n x_count = x_count + 1\n elif box == O:\n o_count = o_count + 1\n # When move count is tied, X is next\n if x_count <= o_count:\n return X\n # When X has moved once more than O, next move is O\n else:\n return O", "def player(board):\n counter = 0\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n counter += 1\n elif board[i][j] == O:\n counter -= 1\n\n if counter == 0:\n return X\n else:\n return O", "def player(board):\r\n if terminal(board):\r\n return \"X\"\r\n \r\n count_X = 0\r\n count_O = 0\r\n for row in board:\r\n for col in row:\r\n if col == \"X\":\r\n count_X += 1\r\n elif col == \"O\":\r\n count_O += 1\r\n \r\n if count_X > count_O:\r\n return \"O\"\r\n else:\r\n return \"X\"\r\n \r\n raise NotImplementedError", "async def tictactoe(self, ctx, opponent: discord.Member):\n\n if opponent == ctx.message.author:\n await ctx.send(\"**You cannot play against yourself!**\")\n return\n\n invitation = lambda d=False: [\n [\n Button(label=\"Decline\", style=ButtonStyle.red, disabled=d),\n Button(label=\"Accept\", style=ButtonStyle.green, disabled=d)\n ]\n ]\n\n msg = await ctx.send(f\"**{opponent.mention}, {ctx.message.author.mention} invited you to a game of TicTacToe!**\", components=invitation())\n\n try:\n\n invite = await bot.wait_for(\"button_click\", check=lambda res: res.user.id == opponent.id and res.message.id == msg.id, timeout=60)\n\n if invite.component.label == \"Decline\":\n await invite.respond(type=InteractionType.UpdateMessage, content=f\"**{opponent.mention} declined the invitation!**\", components=invitation(True))\n return\n \n else:\n await invite.respond(type=InteractionType.UpdateMessage, content=f\"**{opponent.mention} accepted the invitation!**\", components=invitation(True))\n await asyncio.sleep(1)\n pass\n \n except asyncio.TimeoutError:\n await msg.edit(type=InteractionType.UpdateMessage, content=f\"**Timed out!**\", components=invitation(True))\n return\n\n options = [\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]\n ]\n\n O = 1\n X = -1\n\n #the person who has the turn\n turn = random.choice([O, X])\n\n\n#----------------------------------------------------------------------------------------------------------------\n\n\n #updates the board to match the options\n def board(disabled: bool=False):\n\n board = [[0]*3 for i in range(3)]\n\n for i in range(3):\n for j in range(3):\n if options[i][j] == O:\n board[i][j] = Button(style=ButtonStyle.green, label=\"O\", id=f\"{i} {j}\", disabled=True)\n elif options[i][j] == X:\n board[i][j] = Button(style=ButtonStyle.red, label=\"X\", id=f\"{i} {j}\", disabled=True)\n else:\n board[i][j] = Button(style=ButtonStyle.grey, label=\"\\u200b\", id=f\"{i} {j}\", disabled=disabled)\n return board\n\n #check if there is a winner\n def has_won():\n\n #check horizontal\n for x in options:\n if sum(x) == 3 or sum(x) == -3:\n return True\n\n #check vertical\n for y in range(3):\n v = options[0][y] + options[1][y] + options[2][y]\n if v == 3 or v == -3:\n return True\n\n #check diagonals\n d = options[0][2] + options[1][1] + options[2][0]\n if d == 3 or d == -3:\n return True\n\n d = options[0][0] + options[1][1] + options[2][2]\n if d == 3 or d == -3:\n return True\n\n def is_tie():\n\n if not (\"0\" in str(options)) and not has_won():\n return True\n\n def get_player(team):\n\n if team == 1:\n return opponent\n else:\n return ctx.message.author\n\n\n#----------------------------------------------------------------------------------------------------------------\n\n\n await msg.edit(f\"**{get_player(turn).mention}({turn}) goes first**\", components=board())\n\n\n while True:\n try:\n\n #wait 60 seconds for the user who has this turn to react\n res = await bot.wait_for(\"button_click\", check=lambda res: res.user.id == get_player(turn).id and res.message.id == msg.id, timeout=60) \n\n #changes the selected option's value depending on who's turn it is\n options[int(res.component.id.split()[0])][int(res.component.id.split()[1])] = turn\n\n #if there is a winner\n if has_won():\n await res.respond(type=InteractionType.UpdateMessage, content=f\"**🎉 {get_player(turn).mention} is the winner! 🎉**\", components=board(True))\n return\n elif is_tie():\n await res.respond(type=InteractionType.UpdateMessage, content=f\"**Draw!**\", components=board(True))\n return\n else:\n turn = -turn\n await res.respond(type=InteractionType.UpdateMessage, content=f\"**{get_player(turn).mention}'s turn**\", components=board())\n pass\n\n #if the player in turn times out\n except asyncio.TimeoutError:\n await msg.edit(f\"**Timed out! 🎉 {get_player(-turn).mention} is the winner! 🎉**\", components=board(True))\n return", "async def tictactoe(self, ctx, playerTwo: discord.Member):\n if ctx.message.channel.id in self.sessions:\n return # no more than one game session per channel\n else:\n self.sessions.append(ctx.message.channel.id)\n\n finished = False\n count = 0\n takenCells = []\n correct = False\n currPlayer = \"X\"\n winner = \"\"\n topRow = []\n midRow = []\n botRow = []\n\n gameBoard = {\n \"7\": \"7️⃣\",\n \"8\": \"8️⃣\",\n \"9\": \"9️⃣\",\n \"4\": \"4️⃣\",\n \"5\": \"5️⃣\",\n \"6\": \"6️⃣\",\n \"1\": \"1️⃣\",\n \"2\": \"2️⃣\",\n \"3\": \"3️⃣\",\n }\n\n initial_board = f\"\"\"\n{gameBoard['7']}{gameBoard['8']}{gameBoard['9']}\n{gameBoard['4']}{gameBoard['5']}{gameBoard['6']}\n{gameBoard['1']}{gameBoard['2']}{gameBoard['3']}\n \"\"\"\n\n message = await ctx.send(initial_board)\n\n def isCorrectPlayer(msg):\n if currPlayer == \"X\":\n if msg.author == ctx.message.author:\n return True\n else:\n if msg.author == playerTwo:\n return True\n return False\n\n while not finished:\n try:\n while not correct:\n move = await self.bot.wait_for(\n \"message\", check=isCorrectPlayer, timeout=30.0\n )\n if move.content.isdigit():\n if int(move.content) in range(1, 10):\n if move.content not in takenCells:\n correct = True\n break\n else:\n await ctx.send(\"That square is occupied\")\n else:\n await ctx.send(\"Please enter a number from 1-9\")\n else:\n await ctx.send(\"You didn't put in a number. \")\n correct = False\n if currPlayer == \"X\":\n gameBoard[move.content] = \"❎\"\n else:\n gameBoard[move.content] = \"🅾️\"\n takenCells.append(move.content)\n\n await message.edit(\n content=f\"\"\"\n{gameBoard['7']}{gameBoard['8']}{gameBoard['9']}\n{gameBoard['4']}{gameBoard['5']}{gameBoard['6']}\n{gameBoard['1']}{gameBoard['2']}{gameBoard['3']}\n\"\"\"\n )\n count += 1\n await move.delete()\n topRow = [gameBoard[\"7\"], gameBoard[\"8\"], gameBoard[\"9\"]]\n midRow = [gameBoard[\"4\"], gameBoard[\"5\"], gameBoard[\"6\"]]\n botRow = [gameBoard[\"1\"], gameBoard[\"2\"], gameBoard[\"3\"]]\n for i in range(0, 3):\n if topRow[i] == midRow[i] == botRow[i]:\n winner = currPlayer\n finished = True\n break\n elif topRow.count(topRow[i]) == len(topRow):\n winner = currPlayer\n finished = True\n break\n elif midRow.count(midRow[i]) == len(midRow):\n winner = currPlayer\n finished = True\n break\n elif botRow.count(botRow[i]) == len(botRow):\n winner = currPlayer\n finished = True\n break\n elif topRow[0] == midRow[1] == botRow[2]:\n winner = currPlayer\n finished = True\n break\n elif topRow[2] == midRow[1] == botRow[0]:\n winner = currPlayer\n finished = True\n break\n\n if currPlayer == \"X\":\n currPlayer = \"O\"\n else:\n currPlayer = \"X\"\n\n if count == 9:\n await ctx.send(\"Game's over!\")\n finished = True\n break\n\n except TimeoutError:\n await ctx.send(\"You took too long, the game is over! \")\n finished = True\n self.sessions.remove(ctx.message.channel.id)\n return\n if winner == \"X\":\n await ctx.send(ctx.message.author.display_name + \" has won the game!\")\n elif winner == \"O\":\n await ctx.send(playerTwo.display_name + \" has won the game!\")\n else:\n await ctx.send(\"Nobody won!\")\n\n self.sessions.remove(ctx.message.channel.id)\n return", "def play_game_turn(player, symbol):\n\n row = ask_input(player, \"row\")\n column = ask_input(player, \"column\")\n\n if board.is_empty(row, column):\n board.put_symbol(symbol, row, column)\n board.print_board()\n else:\n print \"That spot has been taken. Please try again.\"\n play_game_turn(player, symbol)", "def play(self, tictactoe):\n raise Exception(\"You implement this method to use it.\")", "def play(self,position):\n (x, y) = position\n if self.board[x][y] != 0:\n print('Error, ' + str(x) + ',' + str(y) + ' is not a possible state')\n return\n else:\n self.board[x][y] = self.color\n self.color = self.color * -1\n (terminated, winner) = self.judge_terminal()\n if terminated:\n self.ended = True\n self.winner = winner\n return winner", "def terminal(board):\n\n curr_moves = actions(board)\n #tie\n if(len(curr_moves) == 0 and winner(board) == EMPTY):\n return True\n #winner\n elif(len(curr_moves) != 0 and winner(board) != EMPTY):\n return True\n #game on\n else:\n return False", "def player(board):\n count = 0\n rows = 3\n columns = 3\n for i in range(rows):\n for j in range(columns):\n if board[i][j] != EMPTY:\n count += 1\n if count % 2 == 0:\n player = X\n else:\n player = O\n return player\n\n #raise NotImplementedError", "def play(self):\n for step_i in range(self.max_step):\n player_id = step_i & 1\n player = self.players[player_id]\n action = player.nxt_move()\n if isinstance(player, mcts.MCTSPlayer) and player.value_net.loggable:\n print(f'Player{player_id}: Action: {action}')\n if not self.is_valid_action(action):\n # because now just consider 2 players\n print(f\"Player: {player_id}, Action: {action} Did Not choose a valid action!\")\n self.board[action // self.w][action % self.w] = player_id\n self.winner = 1 - player_id\n else:\n self.board[action // self.w][action % self.w] = player_id\n self.winner = self.k0()\n self.players[1 - player_id].other_nxt_move(action)\n if self.winner != -1:\n break\n print(f'Winner: {self.winner}')\n for player_id in range(len(self.players)):\n self.players[player_id].game_ended()", "def play(self, players, trace=True):\n method = 'play():'\n if self.winner:\n print('game already has a winner:{} use game.reset() to start again'.format(self.winner))\n return\n\n player1 = players[0]\n player2 = players[1]\n if player1.marker == player2.marker:\n LOGGER.error('%sboth players can not have the same marker \"%s\"',\n method, player1.marker)\n return\n\n while self.board.has_open_position():\n if not player1.move(self.board) or \\\n self.board.is_win(player1.marker):\n break\n\n if trace:\n print(self.board)\n\n if not player2.move(self.board) or \\\n self.board.is_win(player2.marker):\n break\n\n if trace:\n print(self.board)\n\n # for player in players:\n # if not player.move(self.board) or \\\n # self.board.is_win(player.marker):\n # break\n\n if trace and self.board.has_open_position():\n print(self.board)\n\n if self.board.is_win(player1.marker):\n self.winner = player1\n elif self.board.is_win(player2.marker):\n self.winner = player2\n else:\n # no open positions, and no winner, it's a draw\n self.winner = None\n\n # self.winner = None\n # for player in players:\n # if self.board.is_win(player.marker)\n # self.winner = player\n # break", "def main():\n # clear the console screen\n os.system('clear')\n\n # get the names of the players\n player_1 = raw_input('What is the name of player 1? ')\n player_2 = raw_input('What is the name of player 2? ')\n\n # ask for the board size\n try:\n board_size = raw_input('How many rows and columns would you like to play with (3)? ')\n if board_size.strip() == '':\n board_size = 3\n else:\n board_size = int(board_size)\n except Exception as e:\n print \"I don't recognize your board size. Try again.\"\n sys.exit()\n\n # create the board (initialize with '-' instead of X and 0)\n board = create_board(board_size)\n\n # do tic-tac-toe until a winner is found\n outcome = tic_tac_toe(board, player_1, player_2)\n\n # print the outcome\n os.system('clear')\n print_board(board)\n print \"\\n%s wins!\" % (player_1 if outcome == 1 else player_2)\n\n\n # The code below writes the outcome to a file and then determines each \n # player's record. All you need to do is ensure that outcome is a boolean \n # value with True representing a win for player 1 and ensure that player_1 \n # and player_2 are both set.\n\n\n # the name of our game results file\n results_file = 'game_results.txt'\n\n write_result(results_file, outcome, player_1, player_2)\n\n print_records(results_file, player_1, player_2)\n\n\n # wait for the user to press enter to quit\n raw_input('\\nPress enter to quit...')\n\n # clear the console screen\n os.system('clear')", "def win_game(self):\n\n def horizontal_win():\n \"\"\"Return whether there is horizontal win\"\"\"\n\n for i in range(0, board_size):\n if set(self.board[i]) == set([o_symbol]) or set(self.board[i]) == set([x_symbol]):\n print \"horizontal win\"\n return True\n\n def vertical_win():\n \"\"\"Return whether there is vertical win\"\"\"\n\n vert_set = set()\n for i in range(0, board_size):\n for j in range(0, board_size):\n vert_set.add(self.board[j][i])\n if vert_set == set([o_symbol]) or vert_set == set([x_symbol]):\n print \"vertical win\"\n return True \n vert_set = set()\n\n def diagonal_win():\n \"\"\"Return whether there is diagonal win\"\"\"\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True\n\n if horizontal_win() or vertical_win() or diagonal_win():\n print \"You have won.\"\n return True", "def player(board):\n moves = 0\n\n for row in range(len(board)):\n for column in range(len(board[row])):\n if board[row][column] != EMPTY:\n moves+=1\n\n if moves % 2 == 0:\n return X\n return O\n\n # raise NotImplementedError", "def main():\n\tprint(\"Welcome to TicTacToe\")\n\tboard = Board()\n\twhile (not board.isOver()):\n\t\tprint(\"It is {0}'s turn\".format(board.current) + board.__str__())\n\t\tmove = input('Where would you like to go? : ').strip()\n\t\tif (move == 'q'):\n\t\t\tbreak\n\t\telif (board.makeMove(move) == 1):\n\t\t\tboard.switchPlayer()\n\t\telse:\n\t\t\tprint(\"I didn't understand your input, these are the valid inputs:\\nentering 'q' will quit out of the game.\\n\")\n\t\t\tprint(\"entering a number will place the peice in that box, the numbers are as follows:\\n \\n1|2|3\\n-----\\n4|5|6\\n-----\\n7|8|9\\n\")\n\tprint(board.__str__() + \"\\nGame Over\")\n\tif (board.isOver() is Piece.EX or board.isOver() is Piece.OH):\n\t\tprint(\"Player {0} wins!\".format(board.isOver())) \n\telse:\n\t\tprint(\"It was a draw\")", "def play_game(self, playerX, playerO):\r\n\r\n\t\tresult = \"0\"\r\n\r\n\t\twhile(result != (\"X\" or \"O\" or \"Tie\")):\r\n\r\n\t\t\tresult = take_turn(playerX, playerO, result)\r\n\r\n\t\treturn result\r\n\r\n\t\tpass", "def move(self, row, col, player):", "def winner(board):\n # Hard code winning moves\n # row0\n if board[0][0] == board[0][1] == board[0][2] == X:\n return X\n elif board[0][0] == board[0][1] == board[0][2] == O:\n return O\n # row1\n elif board[1][0] == board[1][1] == board[1][2] == X:\n return X\n elif board[1][0] == board[1][1] == board[1][2] == O:\n return O\n # row2\n elif board[2][0] == board[2][1] == board[2][2] == X:\n return X\n elif board[2][0] == board[2][1] == board[2][2] == O:\n return O\n # col0\n elif board[0][0] == board[1][0] == board[2][0] == X:\n return X\n elif board[0][0] == board[1][0] == board[2][0] == O:\n return O\n # col1\n elif board[0][1] == board[1][1] == board[2][1] == X:\n return X\n elif board[0][1] == board[1][1] == board[2][1] == O:\n return O\n # col2\n elif board[0][2] == board[1][2] == board[2][2] == X:\n return X\n elif board[0][2] == board[1][2] == board[2][2] == O:\n return O\n # diagonal\n elif board[0][0] == board[1][1] == board[2][2] == X:\n return X\n elif board[0][0] == board[1][1] == board[2][2] == O:\n return O\n # inverse diagonal\n elif board[0][2] == board[1][1] == board[2][0] == X:\n return X\n elif board[0][2] == board[1][1] == board[2][0] == O:\n return O\n\n return None", "def test_play_game(self):\r\n\r\n \r\n a_players = [RandomPlayer(1), RandomPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n #Game is played to competion\r\n a_game.play_game()\r\n\r\n a_history = a_game.get_history()\r\n\r\n #Go through each move and check to be sure it's valid\r\n for i in range(1,len(a_history)):\r\n #Get copy of the board\r\n prev_board = a_history[i-1]\r\n cur_board = a_history[i]\r\n\r\n #Check if the board chosen is in valid states\r\n self.assertTrue(cur_board in prev_board.get_states(a_players[0].get_id()) or cur_board in prev_board.get_states(a_players[1].get_id()),\\\r\n \"An invalid board state was added to the history\")\r\n\r\n if i == len(a_history) - 1:\r\n self.assertTrue(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())\r\n else: \r\n self.assertFalse(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())", "def mc_trial(board, player):\n winner = board.check_win()\n while winner == None:\n next_move = random.choice(board.get_empty_squares())\n board.move(next_move[0], next_move[1], player)\n winner = board.check_win()\n player = provided.switch_player(player)", "def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")", "def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player", "def advance(self, board):", "def play_game(self, x_player, o_player):\n #from connectfour import Player #either from connect4_player or connectfour\n \n current_side = \"X\"\n players = {\"X\": x_player, \"O\": o_player}\n while ((not self.win_for(\"X\")) and\n (not self.win_for(\"O\")) and\n (not self.is_full())):\n print()\n print(self)\n print()\n move = Board.INVALID_MOVE\n while not self.allows_move(move):\n if players[current_side] == \"human\":\n move = int(input(current_side + \"'s move: \"))\n else:\n move = players[current_side].next_move(self)\n print(\"Computer playing for \" + current_side +\n \" plays at \" + str(move))\n\n self.add_move(move, current_side)\n if current_side == \"X\":\n current_side = \"O\"\n else:\n current_side = \"X\"\n\n if self.win_for(\"X\"):\n print(\"X wins --- congratulations!\\n\")\n elif self.win_for(\"O\"):\n print(\"O wins --- congratulations!\\n\")\n else:\n print(\"Tied game!\\n\")\n\n print()\n print(self)", "def play(self):\n\n while self.board.board[self.board.target_location()[0]]\\\n [self.board.target_location()[1]] == \"E\": # the car didn't\n # arrive the exit\n self.__single_turn()\n print(\"you won!\")", "def player(board):\n x = 0\n o = 0\n for row in board:\n for m in row:\n if m == \"X\":\n x += 1\n if m == \"O\":\n o += 1\n return \"O\" if x > o else \"X\"", "def play(self, row: int, col: int) -> bool:\n if not self.is_in_the_grid(row, col):\n return False\n self.flip_around(row, col)\n return True", "def check_winner(self):\n for row in self.board.values():\n if all([mark == \"x\" for mark in row]):\n return self.player_1\n elif all([mark == \"o\" for mark in row]):\n return self.player_2\n\n # checks every column\n for i in range(3):\n first_row, second_row, third_row = self.board.values()\n if first_row[i] == \"x\" and second_row[i] == \"x\" and third_row[i] == \"x\":\n return self.player_1\n elif first_row[i] == \"o\" and second_row[i] == \"o\" and third_row[i] == \"o\":\n return self.player_2\n\n # checks the diagonals\n if self.board[\"a\"][0] == \"x\" and self.board[\"b\"][1] == \"x\" and self.board[\"c\"][2] == \"x\":\n return self.player_1\n if self.board[\"a\"][2] == \"o\" and self.board[\"b\"][1] == \"o\" and self.board[\"c\"][0] == \"o\":\n return self.player_2\n\n return None", "def play_game(game, *players):\n state = game.initial\n while True:\n for player in players:\n print \"now move for player \", player\n move = player(game, state) # update move\n state = game.make_move(move, state) # update game state\n print '---'\n game.display(state) # display board\n print '---'\n \n if move == None or game.terminal_test(state): #check game end\n if game.utility(state,'X')==1:\n print 'X has won!'\n elif game.utility(state,'O')==1:\n print 'O has won!'\n else:\n print 'Its A Draw!'\n return #exit", "def terminal(board):\n if winner(board) == X or winner(board) == O:\n return True\n moves = 0\n for row in board:\n for turn in row:\n if turn == X or turn == O:\n moves += 1\n if moves == 9:\n return True\n return False", "def playOneGame(self, p1, p2, show):\n currentPlayer, otherPlayer = p1, p2\n winner = None\n gameFinished = False\n #\n while not(gameFinished): \n if show:\n self.display() # show the board\n # \n move = currentPlayer.getMove(self.deepCopy())\n if show:\n print currentPlayer.name + ' is playing in column ' , move\n \n if (move == []) or (not move in self.legalMoves()): # for dysfunctional player\n gameFinished = True\n winner = otherPlayer\n else: \n self.makeMove(currentPlayer.colour, move)\n winningColour, gameFinished = self.checkwin()\n if gameFinished:\n winner = currentPlayer\n else:\n currentPlayer, otherPlayer = otherPlayer, currentPlayer\n # if in verbose mode display the outcome of the game\n if show:\n self.display()\n if winner:\n print 'The winner is ', winner.name ,' ' ,\n if winner.colour == WHITE:\n print 'White -'\n else:\n print 'Black +' \n else:\n print 'Game ended in a draw'\n #\n return winner", "def play(self, index):\n if index < 0 or index >= 9:\n raise IndexError(\"Invalid board index\")\n\n if self.board[index] != ' ':\n raise ValueError(\"Square already played\")\n\n # One downside of storing the board state as a string\n # is that you can't mutate it in place.\n board = list(self.board)\n board[index] = self.next_player\n self.board = u''.join(board)", "def playGame(self):\n print(\"\\nPlay Game\")\n if (self.EndGame()):\n print(\"EndGame stt: \", self.EndGame())\n\n print(\"The End\")\n return True\n else:\n # Get pieceList from thong\n input_result = self.inputMove() \n # input move return 2 forms: True and input result\n if input_result is not True:\n return input_result\n else:\n # Export time table to csv\n black_timetable = pd.DataFrame(self.timetable_black, columns=['Iteration', 'Time']).to_csv(\n \"Time_black.csv\", index=False)\n return True", "def play_cpu(self):\n \n # Play button sound\n self.button_sound()\n\n while True:\n\n # If turns is 9, then all the places on the board are filled. Hence Cpu doesn't get a turn. \n if self.turn >= 9:\n break\n\n # Choose a random position and if that position on board is empty, then place a 'O' there.\n i = random.randint(0, 8)\n if self.board[i] == 0:\n #root.after(400)\n self.button_list[i].config(image=self.O_img)\n self.board[i] = -1\n self.turn += 1\n\n break", "def player(board):\n\tif board == initial_state():\n\t\treturn X\n\n\tnumX=0\n\tnumO=0\n\n\tfor i in range(len(board)):\n\t\tfor j in range(len(board)):\n\t\t\tif(board[i][j]==X):\n\t\t\t\tnumX+=1\n\t\t\telif(board[i][j]==O):\n\t\t\t\tnumO+=1\n\n\tif numX > numO:\n\t\treturn O\n\telse:\n\t\treturn X", "def player(board):\n x_counter = 0\n o_counter = 0\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n x_counter += 1\n elif board[i][j] == O:\n o_counter += 1\n \n # print(f\"x: {x_counter}\")\n # print(f\"o: {o_counter}\")\n \n if x_counter > o_counter:\n return O\n else:\n return X", "def check_victory(board):\n\n for row in range(HEIGHT):\n for col in range(WIDTH):\n\n player = board[row][col]\n\n # not a player move\n if player == 0 or player == 9:\n continue\n\n # look right\n if col + 3 < WIDTH and player == board[row][col + 1] and player == board[row][col + 2]\\\n and player == board[row][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n if row + 3 < HEIGHT:\n\n # down\n if player == board[row + 1][col] and player == board[row + 2][col] and player == board[row + 3][col]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and right\n if col + 3 < WIDTH and player == board[row + 1][col + 1] and player == board[row + 2][col + 2]\\\n and player == board[row + 3][col + 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n # down and left\n if col - 3 >= 0 and player == board[row + 1][col - 1] and player == board[row + 2][col - 2] \\\n and player == board[row + 3][col - 3]:\n if player == 1:\n return +1\n else:\n return -1\n\n\n # # if no one has won yet\n for row in range(HEIGHT):\n for col in range(WIDTH):\n if board[row][col] == 0 or board[row][col] == 9:\n return None\n\n return 0", "def main():\n\n print('R-In-A-Row')\n print()\n\n while True:\n if play == 'human vs human':\n human1Tile, human2Tile = enterHuman1Tile()\n\n turn = whoGoesFirst()\n print('The %s player will got first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'human vs computer':\n human1Tile, computer1Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n mainBoard = getNewBoard()\n elif play == 'computer vs computer':\n computer1Tile, computer2Tile = enterHuman1Tile()\n turn = whoGoesFirst()\n print('The %s player will go first.' % (turn))\n\n\n while True:\n if play == 'human vs human':\n if turn == 'human1':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n\n makeMove(mainBoard, human1Tile, move)\n\n if isWinner(mainBoard, human1Tile):\n winner = 'human1'\n\n break\n turn = 'human2'\n if turn == 'human2':\n drawBoard(mainBoard)\n move2 = getHuman2Move(mainBoard)\n makeMove(mainBoard, human2Tile, move2)\n if isWinner(mainBoard, human2Tile):\n winner = 'human2'\n break\n turn = 'human1'\n\n elif play == 'human vs computer' :\n if turn == 'human':\n drawBoard(mainBoard)\n move = getHuman1Move(mainBoard)\n makeMove(mainBoard, human1Tile, move)\n if isWinner(mainBoard, human1Tile):\n winner = 'human'\n\n break\n turn ='computer'\n\n elif turn == 'computer':\n drawBoard(mainBoard)\n print('The computer is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer'\n break\n turn = 'human'\n elif play == 'computer vs computer':\n if turn == 'computer1':\n drawBoard(mainBoard)\n print('computer1 is thinking...')\n move = getComputer1Move(mainBoard, computer1Tile)\n makeMove(mainBoard, computer1Tile, move)\n if isWinner(mainBoard, computer1Tile):\n winner = 'computer1'\n break\n turn = 'computer2'\n elif turn == 'computer2':\n drawBoard(mainBoard)\n print('computer2 is thinking...')\n move = getComputer2Move(mainBoard, computer2Tile)\n makeMove(mainBoard, computer2Tile, move)\n if isWinner(mainBoard, computer2Tile):\n winner = 'computer2'\n break\n turn = 'computer1'\n\n\n if isBoardFull(mainBoard):\n winner = 'tie'\n break\n\n drawBoard(mainBoard)\n print('Winner is: %s' % winner)\n if not playAgain():\n break", "def mc_trial(board, player):\n tmp_player=player\n while board.check_win()==None:\n #print board.check_win()\n empty=board.get_empty_squares()\n #print empty\n square=random.choice(empty)\n #print square\n board.move(square[0],square[1],tmp_player)\n tmp_player=switch_player(tmp_player)\n return", "def game_play(self):", "def win_game(board :list) -> bool:\n if board == win_state:\n return True\n return False", "def __check_winner(self):\n for i in range(0, 3):\n col = self.__get_col(i)\n if col.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if col.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n row = self.__get_row(i)\n if row.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if row.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if diag.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if diag.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n if self.state.count(' ') == 0:\n print('\\nDraw!')\n self.game_ended = True", "def play_game():\n pass", "def winner(board):\r\n\r\n #rows:\r\n if (board[0][0] == board[0][1] == board[0][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[1][0] == board[1][1] == board[1][2]) and (board[1][0] == \"X\" or board[1][0] == \"O\"):\r\n return board[1][0]\r\n if (board[2][0] == board[2][1] == board[2][2]) and (board[2][0] == \"X\" or board[2][0] == \"O\"):\r\n return board[2][0]\r\n\r\n #columns\r\n if (board[0][0] == board[1][0] == board[2][0]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][1] == board[1][1] == board[2][1]) and (board[0][1] == \"X\" or board[0][1] == \"O\"):\r\n return board[0][1]\r\n if (board[0][2] == board[1][2] == board[2][2]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n\r\n #diagonals\r\n if (board[0][0] == board[1][1] == board[2][2]) and (board[0][0] == \"X\" or board[0][0] == \"O\"):\r\n return board[0][0]\r\n if (board[0][2] == board[1][1] == board[2][0]) and (board[0][2] == \"X\" or board[0][2] == \"O\"):\r\n return board[0][2]\r\n \r\n return None\r\n\r\n raise NotImplementedError", "def win(s):\r\n\r\n # check across\r\n for i in range(3):\r\n if board[0 + 3 * i] == board[1 + 3 * i] == board[2 + 3 * i] == s:\r\n board[0 + 3 * i] = board[1 + 3 * i] = board[2 + 3 * i] = '#'\r\n return True\r\n\r\n # check down\r\n for i in range(3):\r\n if board[i] == board[i + 3] == board[i + 6] == s:\r\n board[i] = board[i + 3] = board[i + 6] = '#'\r\n return True\r\n\r\n # check diagonal right\r\n if board[0] == board[4] == board[8] == s:\r\n board[0] = board[4] = board[8] = '#'\r\n return True\r\n\r\n # check diagonal left\r\n if board[6] == board[4] == board[2] == s:\r\n board[6] = board[4] = board[2] = '#'\r\n return True\r\n\r\n return False", "def next_player(self):\n # Counter is a useful class that counts objects.\n count = Counter(self.board)\n if count.get('X', 0) > count.get('O', 0):\n return 'O'\n return 'X'", "def play_strategic_game():\n board, winner = create_board(), 0\n board[1,1] = 1\n while winner == 0:\n for player in [2,1]:\n board = random_place(board, player)\n winner = evaluate(board)\n if winner != 0:\n break\n return winner", "def play_game():\n # Display board.\n display_board()\n # While game is still going.\n while game_still_going:\n # Handle a single turn of an arbitrary player.\n handle_turn(current_player)\n # Flip to another player.\n flip_player()\n # Check weather game is over or not.\n check_if_game_over()", "def play_round(self):\r\n your_move = self.you.move()\r\n opposite_move = self.opposite.move()\r\n result = Game.what_move(your_move, opposite_move)\r\n\r\n self.you.learn(opposite_move)\r\n self.opposite.learn(your_move)\r\n\r\n print(\"you choose:\" + your_move + \" and the opposite player choose:\" +\r\n opposite_move)\r\n\r\n if result == 1:\r\n self.you.score += 1\r\n print('=> you won this round!\\n')\r\n elif result == 2:\r\n self.opposite.score += 1\r\n print('=> the opposite pleyer won this round!\\n')\r\n elif result == 0:\r\n print('=> it is Draw!\\n')", "def test_play_example(self):\n data = [[0, 1, 0, 2, 0, 1],\n [2, 0, 2, 1, 2, 1],\n [2, 2, 1, 2, 2, 2],\n [2, 1, 0, 2, 0, 1],\n [2, 1, 0, 1, 1, 1],\n [0, 2, 0, 1, 2, 0]]\n board = Board(data)\n board.set_color(2)\n board.set_color(1)\n board.set_color(0)\n board.set_color(2)\n data_end = [[2, 2, 2, 2, 0, 1],\n [2, 2, 2, 1, 2, 1],\n [2, 2, 2, 2, 2, 2],\n [2, 2, 2, 2, 0, 1],\n [2, 2, 2, 1, 1, 1],\n [2, 2, 2, 1, 2, 0]]\n self.assertListEqual(board.data, data_end)", "def printTurn(self,board,tile):\n if tile == board.BLACK:\n print \"\\n\\nBlack turn 'O'\"\n else:\n print \"\\n\\nWhite turn 'X'\"", "def play():\n global done\n done = False\n g = Game()\n turn = random.choice([PLAYER, AI])\n transitions_agent = []\n agent.epsilon = agent.eps_min\n while done == False:\n g.printBoard()\n if turn == PLAYER:\n row = input('{}\\'s turn:'.format('Red'))\n g.insert(int(row), PLAYER_PIECE)\n else:\n observation = []\n for sublist in g.board:\n for i in sublist:\n observation.append(i)\n observation = np.asarray(observation)\n action = agent.choose_action(observation)\n if g.check_if_action_valid(action):\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n else:\n while g.check_if_action_valid(action) == False:\n agent.store_transition(observation, action, -100, observation, done)\n action = action = np.random.randint(7)\n print('{}\\'s turn: %d'.format('Yellow') % action)\n g.insert(action, AI_PIECE)\n observation_ = []\n for sublist in g.board:\n for i in sublist:\n observation_.append(i)\n observation_ = np.asarray(observation_)\n transitions_agent += [(observation, action, observation_, done)]\n turn = AI if turn == PLAYER else PLAYER\n winner = AI if turn == PLAYER else PLAYER\n if winner == AI:\n reward = 20\n else:\n reward = -20\n for i in range(len(transitions_agent)):\n agent.store_transition(transitions_agent[i][0], transitions_agent[i][1], reward, transitions_agent[i][2],\n transitions_agent[i][3])\n agent.learn()\n return", "def play_single_turn(self, action=None):\n self.turn_count += 1\n if self.save_images_suffix:\n self.game_board.graphical_output(save=True, display=False,\n image_suffix=f'{self.save_images_suffix}_{self.turn_count}')\n if self.game_type == self.game_types['human']:\n self.game_board.graphical_output()\n\n self.player_1.play_turn(action if action else self.get_color_options())\n self.player_2.play_turn(self.get_color_options())\n\n if self.game_type == self.game_types['vs_ai']:\n self.game_board.graphical_output(save=True, image_suffix=self.turn_count)\n\n if self.game_type != self.game_types['r_l']:\n print(f\"player 1 played {self.player_1.color}: {self.player_1.score}\")\n print(f\"player 2 played {self.player_2.color}: {self.player_2.score}\")\n print()", "def check_win(self):\r\n wins = [self.check_rows(), self.check_cols(), self.check_diag()]\r\n for case, pos in wins:\r\n if case != -1:\r\n print('Game over!')\r\n if self.grid[case][-1] == self.computer:\r\n print('The computer won!')\r\n return (True, pos)\r\n print('The player won!')\r\n return (True, pos)\r\n\r\n return (self.check_draw(), None)", "def player(board):\n X_count = 0\n O_count = 0\n #to determine the turn, I will make a count of the X and O tokens on the board\n for row in board:\n #I create a dictionary with the count on each row\n player_turns = {i: row.count(i) for i in row}\n #I check if I have X and O tokens in the row, if not, create an entry with 0\n if not (player_turns.get(\"X\")):\n player_turns['X'] = 0\n if not player_turns.get(\"O\"):\n player_turns['O'] = 0\n #I add to my counter the total amount of tokens found for each player in this row\n X_count = X_count + int(player_turns['X'])\n O_count = O_count + int(player_turns['O'])\n\n #if X has the same amount of tokens than O, it means it is X's turn\n if(X_count == O_count):\n #It should be X's turn. \n return \"X\"\n #Otherwise, it is O's turn.\n elif(X_count>O_count):\n #it is O's turn.\n return \"O\"", "def winner(board):\n for turn in [X,O]:\n for i in range(3):\n if board[i] == [turn, turn, turn]:\n return turn\n if board[0][i] == turn and board[1][i] == turn and board[2][i] == turn:\n return turn\n if board[0][0] == turn and board[1][1] == turn and board[2][2] == turn:\n return turn\n if board[0][2] == turn and board[1][1] == turn and board[2][0] == turn:\n return turn\n return None", "def play(self, player, position, whos_play):\n\n # check if the position has already been marked\n if position not in self.played_pos:\n\n # mark the position with players marker\n self.mark_pos(position, TicTacToe.player_marker[player])\n\n # keep the played position in history of played position\n self.played_pos.append(position)\n\n # keep the position played by player\n self.player_played_pos[player].add(position)\n\n # print the current state of the game\n print(self)\n\n # after the move, check if the player won the game. If so exit the program\n if self.check_win(player):\n print(\"{} wins\".format(player))\n exit(0)\n\n # check if all the positions in grid have been played. If so game is a draw.\n # returns True for is_all_moves_over variable.\n if len(self.played_pos) == TicTacToe.grid_size:\n return True, not whos_play\n\n # if the method doesnt return till now, return False for is_all_moves_over\n # and reverses the p1_move variable in calling method to indicate move of the next person.\n return False, not whos_play\n\n else:\n print(\"Position already played.. player {} play again..\".format(player))\n\n # if the play is invalid, indicate that the game is still on, but doesnt invert the p1_move in the\n # calling method which force the same player to play again\n return False, whos_play", "def mc_trial(board, player):\n if len(board.get_empty_squares()) > 0:\n gra_w_toku = True\n else:\n gra_w_toku = False\n while gra_w_toku:\n tupka = random.choice(board.get_empty_squares())\n board.move(tupka[0], tupka[1], player)\n status = board.check_win()\n if status == player or status == provided.DRAW:\n gra_w_toku = not gra_w_toku\n player = provided.switch_player(player)\n return None", "def UCTPlayGame(itermax):\r\n print(\"Welcome to Ultimate Tic-Tac-Toe!\")\r\n player = 2 if input(\"Do you want to go first? [Y/N]: \") == \"N\" else 1\r\n\r\n state = GameState()\r\n while state.GetMoves():\r\n currentPlayer = state.NextPlayer()\r\n\r\n print(str(state))\r\n print(\"Moves for player \" + str(currentPlayer) + \": \")\r\n print(np.matrix(state.GetMoves()), \"\\n\")\r\n\r\n if currentPlayer == player:\r\n m = None\r\n while m not in state.GetMoves():\r\n try:\r\n m = int(input(\"Your move: \"))\r\n except ValueError:\r\n continue\r\n # m = random.choice(state.GetMoves())\r\n else:\r\n m = UCT(rootstate=state, itermax=itermax, verbose=False)\r\n print(\"AI played: \" + str(m))\r\n state.DoMove(m)\r\n print(str(state))\r\n\r\n if state.GetResult(state.playerJustMoved) == 1.0:\r\n print(\"Player \" + str(state.playerJustMoved) + \" wins!\")\r\n return state.playerJustMoved\r\n elif state.GetResult(state.playerJustMoved) == 0.0:\r\n print(\"Player \" + str(state.NextPlayer()) + \" wins!\")\r\n return state.NextPlayer()\r\n else:\r\n print(\"Nobody wins!\")\r\n return 0", "def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'", "def game(self):\n counter = 22\n while counter != 0:\n for line in self.f_board:\n print(\"\".join(line))\n i = Inputer().inputer()\n\n if self.board[i[0]][i[1]] == '1':\n print(\"You hit me!\")\n counter -=1\n self.f_board[i[0]][i[1]] = \"X\"\n else:\n print(\"You missed\")\n self.f_board[i[0]][i[1]] = \"-\"\n else:\n print(\"You win!\")", "def bot():\n table = [ \n \"-\", \"-\", \"-\", \n \"-\", \"-\", \"-\", \n \"-\", \"-\", \"-\", \n ]\n choices = choice()\n turn = [0,1,2,3,4,5,6,7,8]\n \n while len(turn) != 0:\n \n # Player1 turn\n move_index, turn = table_check(table, turn) # Check if the index is valid\n table[move_index] = choices[0] # Fill X or O to the table base on the index chosen\n display_board(table) # Display to let them see for 2nd player's turn\n\n # The game cannot be won unless 5 moves has been played, so when turn has been reduced to 4 moves or less, check win\n # Check win before tie since last move might make it a win\n if len(turn) <= 4:\n win_condition, win = win_check(table)\n if win_condition == True:\n print(f\"\\nYou won!!!\\nThanks for playing!\")\n retry()\n\n # \"X\" will be the one who finish the game, so after filling the X into the table\n # We need to check if it's the last turn, if yes than break\n if len(turn) == 0:\n break\n \n # Bot's turn\n move_index = random.choice(turn) # Bot moves can just be chosen randomly from the\n turn.remove(move_index) # available moves from turn, so doesnt need to table_check()\n table[move_index] = choices[1] # Fill X or O to the table base on the index chosen\n print(\"Bot is thinking....\")\n time.sleep(random.randint(1,2)) # Make it realistic\n\n # The game cannot be won unless 5 moves has been played, so when turn has been reduced to 4 moves or less, check win\n if len(turn) <= 4:\n win_condition, win = win_check(table)\n if win_condition == True:\n display_board(table)\n print(f\"The bot won!!!\\nThanks for playing!\")\n retry()\n\n\n print(\"\\nDRAW!\")\n retry()", "def player(board):\n if board == initial_state():\n return X\n\n total_x = 0\n total_o = 0\n\n for i in board:\n total_x += i.count(X)\n total_o += i.count(O)\n\n if (total_x + total_o) % 2 == 1:\n return O\n else:\n return X", "def tic_tac_toe_x(win, p1x, p1y):\r\n for i in range(2):\r\n deltaX = (-1) ** i * (.35)\r\n deltaY = (.35)\r\n line = Line(Point(p1x - deltaX, p1y - deltaY),\r\n Point(p1x + deltaX, p1y + deltaY))\r\n line.setOutline('red')\r\n line.setWidth(5)\r\n line.draw(win)" ]
[ "0.7581382", "0.75775814", "0.74732363", "0.7191872", "0.714979", "0.7070667", "0.7068065", "0.7031623", "0.7028581", "0.6929111", "0.69243205", "0.6854077", "0.6834494", "0.6790149", "0.67888355", "0.6788085", "0.6787451", "0.6786783", "0.6753092", "0.67121536", "0.66924953", "0.66838", "0.66741395", "0.6669672", "0.6666273", "0.6641511", "0.663747", "0.66247827", "0.66171724", "0.66092426", "0.6608021", "0.66076165", "0.6592951", "0.6587085", "0.6582898", "0.6574453", "0.6557697", "0.65569896", "0.65528923", "0.6542954", "0.6523711", "0.6507808", "0.65035033", "0.6494417", "0.648364", "0.6477911", "0.647334", "0.6470685", "0.64693654", "0.64662373", "0.6462067", "0.64474714", "0.64453876", "0.6438313", "0.64370835", "0.64215165", "0.64204943", "0.64088297", "0.63849497", "0.6379246", "0.63789237", "0.63763714", "0.63759875", "0.63739204", "0.63721293", "0.63641155", "0.6363853", "0.6362917", "0.6361804", "0.6361701", "0.63616073", "0.63592786", "0.63576955", "0.6355015", "0.6349555", "0.63353586", "0.6333572", "0.63309866", "0.63271517", "0.63232636", "0.63186157", "0.6317227", "0.63074154", "0.6302047", "0.6291033", "0.6290891", "0.6290546", "0.62902164", "0.6289914", "0.628029", "0.6272627", "0.6271472", "0.6267848", "0.6260707", "0.6255974", "0.62554777", "0.62517524", "0.62495506", "0.6243965", "0.62406015" ]
0.73010886
3
Calculates rscu values for each codon
def calculate_rscu(handle: str, genetic_code_num: int, min_len_threshold: int = 200, gene_analysis: bool = False, save_file: bool = False, file_name: str = 'RSCU_report', folder_path: str = 'Report') -> \ dict[str, float | dict[str, float]]: records = parse(handle, 'fasta') references = filter_reference(records, min_len_threshold) if gene_analysis: rscu_dict = dict() for i, seq in enumerate(references): rscu_dict.update({f'gene_{i + 1}': RSCU([seq], genetic_code_num)}) if save_file: name = file_name + '.xlsx' make_dir(folder_path) file_path = join(folder_path, name) if is_file_writeable(file_path): df = pd.DataFrame.from_records( [ (gene, codon, rscu_val) for gene, rscu_vals in rscu_dict.items() for codon, rscu_val in rscu_vals.items() ], columns=['Gene', 'Codon', 'RSCU_vals'] ) df.to_excel(file_path, float_format='%.4f', columns=df.columns) print(f'The RSCU score file can be found at: {abspath(file_path)}') else: reference = filter_reference(records, min_len_threshold) rscu_dict = RSCU(reference, genetic_code_num) if save_file: name = file_name + '.xlsx' make_dir(folder_path) file_path = join(folder_path, name) if is_file_writeable(file_path): df = pd.DataFrame.from_records( [ (codon, rscu_val) for codon, rscu_val in rscu_dict.items() ], columns=['Codon', 'RSCU_vals'] ) df.to_excel(file_path, float_format='%.4f', columns=df.columns) print(f'The RSCU score file can be found at: {abspath(file_path)}') return rscu_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SumaryCompras(vj):\n\n vj.CompasCUC = vj.MontoPrecios = vj.GanancPrecios = 0.0\n\n for row in vj.tbCompras.rows.values():\n prec = vj.MD.Convert( row.precio, row.moneda, MD.Cuc ) # Siempre lleva el precio a CUC\n\n vj.MontoPrecios += ( prec * row.count )\n vj.CompasCUC += row.valCUC\n\n UpdateRecupIdx(vj)\n vj.GanancPrecios = vj.MontoPrecios - vj.MontoInvers", "def _get_cu(self):\n c_undrained=0\n #group_index = self._data['GI']\n if self.is_clayey():\n c_undrained = self.qu(self._data[SoilProperty.N60])/2\n #c_undrained=_clamp(c_undrained, 10, 103)\n # Plasix calculation needs very small c_undrained\n #if c_undrained<0.21:\n # c_undrained = 0.21\n #use 0.2 as per plasix recommendation\n return c_undrained#the cu is always 103 check with small value of n_60, some mistake maybe", "def calc_perc_reducts():\n #Load RCP2.6 datq\n cubes = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')\n #Get the surface and high level SO2 emissions\n surf_cube = cubes[3][:,0]\n high_cube = cubes[1][:,0]\n cubes = iris.cube.CubeList([surf_cube,high_cube])\n\n for i in range(0,len(cubes)):\n #Add the year and month to the cube and extract for 2009 onwards\n iris.coord_categorisation.add_year(cubes[i],'time',name='year')\n iris.coord_categorisation.add_month(cubes[i],'time',name='month')\n cubes[i] = cubes[i].extract(iris.Constraint(year = lambda y: y >=2009))\n\n #Make the year-on-year reduction rates\n yoy_rates = []\n for cube in cubes:\n #Calculate the global mean timeseries\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n area_weights = iris.analysis.cartography.area_weights(cube)\n cube_mean = cube.collapsed(['latitude','longitude'],iris.analysis.MEAN,weights=area_weights)\n\n cube_rates = np.ones((cube_mean.shape))\n #Loop over the months and calculate the changes from the previous year\n #Calculate the year on year proportional changes in the global mean\n for i in range(12,cube_mean.shape[0]):\n cube_rates[i] = cube_mean[i].data / cube_mean[(i-12)].data\n\n yoy_rates.append(cube_rates)\n\n return yoy_rates", "def calculate_cci(hunterlab):\n return 1000 * (hunterlab[1]) / (hunterlab[0] * hunterlab[2])", "def computeRmse(model, data, n , sc):\n truth = data.map( lambda x: ((x[0], x[1]), x[2]) )\n truth.cache()\n ##print 'test zhou 0.....', truth.count() , '............', truth.take(10)\n\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictions.cache()\n # here let's rescale predicted ratings to 0-10 scale\n maxPrediction = predictions.map(lambda x: x[2]).max()\n minPrediction = predictions.map(lambda x: x[2]).min()\n maxRate = RatingScale\n minRate = RatingScaleMin\n ##print 'test zhou 1......', predictions.count(), '............', predictions.take(10)\n\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate )).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n\n\n #predictedRating = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate ) )\n predictedRating = predictions.map(lambda x: ((x[0], x[1]), x[2] ) )\n predictedRating.cache()\n ##predictedRating.checkpoint()\n ##print 'test zhou 2.......', predictedRating.count(), '............', predictedRating.take(10)\n\n\n \n\n\n predictionsAndRatings = predictedRating.join(truth).values()\n #predictionsAndRatings = sc.union(predictedRating, truth)\n predictionsAndRatings.cache()\n #print 'test zhou 3........', predictionsAndRatings.count(), '............', predictionsAndRatings.take(10)\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n \n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n #return 1.0", "def compute_county_cirle(county_population):\n return SCATTER_SCALE * county_population", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def calcCaliCorrandR(constants, corr, data, outName):\n print(constants)\n perr=np.sqrt(np.diag(corr))\n print(perr)\n corrmat=np.zeros([len(constants),len(constants)])\n for i in range(len(corr)):\n for j in range(len(corr)):\n \n ele=corr[i,j]\n diele=ele/(perr[i]*perr[j])\n corrmat[i,j]=round(diele,3)\n print(corrmat)\n #calculate the r^2 value\n ss_res = 0\n ss_total = 0\n residuals = np.zeros([len(data[:,0]), 1])\n for i in range(len(data[:,0])):\n residuals[i] = (LangmuirCurve(data[i,0],constants[0],constants[1],constants[2],constants[3]) - data[i,1])\n ss_res += np.square(residuals[i])\n ss_total += np.square((data[i,1] - np.average(data[:,1])))\n print(ss_res)\n print(ss_total)\n r_sq = 1 - (ss_res/ss_total)\n print(r_sq)\n #write out the fit results\n f = open(outName + \"_cali_constants.txt\", 'w')\n f.write(\"B\\ta\\tN\\tK\\n\")\n for i in range(len(constants)):\n f.write('%.9f' %constants[i] + \"\\t\")\n f.write(\"\\n\\n\")\n for i in range(len(corr)):\n f.write('%.9f' %perr[i] + \"\\t\")\n f.write(\"\\n\\n\")\n f.write(\"Correlation matrix :\\n\\n\")\n for i in range(len(corr)):\n for j in range(len(corr)):\n f.write('%.9f' %corrmat[i,j]+'\\t')\n f.write(\"\\n\\n\")\n f.write(\"R^2 value : \\t\" + '%.9f' %r_sq)\n f.close()", "def sum_crimes(cs:CrimeStatistics)-> int:\n # return 0 # stub\n #template from atomic\n crimes_total = (cs.violent_crimes+cs.property_crimes+cs.arson)\n return crimes_total", "def get_crime_rate(crime):#=d1Data.get_US_crime()\n crimeRates_list = []\n for i in range(0,len(crime)):\n crimeRates = list(crime[i])\n crimeRates[2:] = list(round(100000*crimeRates[j]/crimeRates[1],1) for j in range(2,len(crime[0])))\n crimeRates_list.append(crimeRates)\n return(crimeRates_list)", "def compute_correlation_separability_score(self) -> float:\n sep_scores = pd.DataFrame.from_dict(self.separability_scores).to_numpy()\n sep_scores = minmax_scale(sep_scores)\n corrs = {}\n for tumor_pair in range(sep_scores.shape[1]):\n corr_sep_score = np.corrcoef(PATHO_PRIOR[:, tumor_pair], sep_scores[:, tumor_pair])\n corrs[tumor_pair] = corr_sep_score[1, 0]\n corrs['agg_with_risk'] = sum(\n np.array([val for _, val in corrs.items()]) *\n RISK\n ) \n corrs['agg'] = sum([val for key, val in corrs.items() if type(key)==int]) \n return corrs", "def runcircos(self):\n pd.read_csv(self.cns, sep=\"\\t\")[\n [\"chromosome\", \"start\", \"end\", \"tcn\"]\n ].rename({\"chromosome\": \"chrm\", \"tcn\": \"cns\"}, axis=1).to_csv(\n self.segs, index=None\n )\n\n passed_svs = [\n sv\n for sv in self.svs.values()\n ]\n circos_sv_file = os.path.join(\n self.out_dir, \"circos_svs.tsv\"\n )\n circos_df = pd.DataFrame(\n [\n (\"chr\" + sv.chr1, sv.pos1, sv.pos1, \"chr\" + sv.chr2, sv.pos2, sv.pos2)\n for sv in passed_svs\n ],\n columns=[\n \"Chromosome\",\n \"chromStart\",\n \"chromEnd\",\n \"Chromosome.1\",\n \"chromStart.1\",\n \"chromEnd.1\",\n ],\n )\n circos_df.to_csv(circos_sv_file, index=None)", "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "def get_rcs():\n kk = np.loadtxt(source+\"/kids_data/rcslens2.csv\", delimiter=\",\",\n skiprows=1, max_rows=sample)\n global maxra\n maxra = max(kk[:sample, 0])\n global minra\n minra = min(kk[:sample, 0])\n global maxdec\n maxdec = max(kk[:sample, 1])\n global mindec\n mindec = min(kk[:sample, 1])\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([kk[:sample, 0], kk[:sample, 1]])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n # gamma_shear = -k[:,2]*np.cos\n return ctree, kk[:sample, 2], kk[:sample,\n 3], kk[:sample, 4], kk[:sample, 5]", "def getRNCS(ChargeSA):\n charge=[]\n for i in ChargeSA:\n charge.append(float(i[1]))\n\n temp=[]\n for i in ChargeSA:\n temp.append(i[2])\n\n try:\n RNCG = min(charge)/sum([i for i in charge if i < 0.0])\n return temp[charge.index(min(charge))]/RNCG\n except:\n return 0.0", "def __cnc(cls, sens_mv, we_c):\n if we_c is None:\n return None\n\n cnc = we_c / (sens_mv / 1000.0)\n\n # print(\"A4Datum__cnc: we_c:%s cnc:%f\" % (we_c, cnc), file=sys.stderr)\n\n return cnc", "def calc_cop():\n df = pp.load_csv_file('COP_in.csv', 'metrics_data') \n df = pp.clean_dataframe(df, 5)\n\n df_cop = df['LP01LM01_QQ'] / df['SJ01_SM01']\n df_cop = df_cop.replace(to_replace=np.nan, value = 0, inplace=False)\n \n return df_cop", "def fRCrim(Swe,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rh,Cwv,Ckv,Alpha,Tout):\n#\n# 1. Compute and normalise volumetric components:\n#\t-----------------------------------------------\n\tVw=PHIe*Swe\n\tVh=PHIe*(1-Swe)\n\tVwe=(Vw-Cwv)/(1-Cwv)\n\tVwe=ImposeLimits(Vwe,0,1)\n\tVke=(Vk-Ckv)/(1-Ckv)\n\tVke=ImposeLimits(Vke,0,1)\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vke)+abs(Vwe)+abs(Vh)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVh=abs(Vh)/Sum\n#\n#\t2. Determine conductivity of components:\n#\t----------------------------------------\n\tSigc1=1/Rc1\n\tSigc2=1/Rc2\n\tSigc3=1/Rc3\n\tSigk=1/Rk\n\tSigw=1/Rw\n\tSigh=1/Rh\n#\n#\t3. Compute Conductivity:\n#\t========================\n\tTrm1=Vc1*(Sigc1**(1/Alpha))\n\tTrm2=Vc2*(Sigc2**(1/Alpha))\n\tTrm3=Vc3*(Sigc3**(1/Alpha))\n\tTrm4=(Vk**2.2)*(Sigk**(1/Alpha)) # Factor of 2.2 included to get data to fit to Yang et al\n\tTrm5=Vw*(Sigw**(1/Alpha))\n\tTrm6=Vh*(Sigh**(1/Alpha))\n\tCrf=(Trm1+Trm2+Trm3+Trm4+Trm5+Trm6)**Alpha\n#\n#\n# 4. Output result:\n#\t-----------------\n\tif(Tout==0):\n\t\tFr=Crf\n\telse:\n\t\tFr=1/Crf\n\treturn Fr", "def _calculate_r0(net):\n\n r0 = 0\n for reaction in net.reactions:\n t = reaction.rate(net.species)\n r0 += t\n\n return r0", "def qcd_cc( s, m, r, u ):\n\n l2_min = r\n l3_min = u\n l1_min = (m+1)/2\n l1_max = l2_max = l3_max = (3*s+m+2*r+2*u)/2\n\n S = 0\n for l1 in range(l1_min, l2_max+1):\n for l2 in range(l2_min, l2_max+1):\n for l3 in range(l3_min, l3_max+1):\n n1 = 2*l1 + l2 + l3 - 2*s - m - r - u\n n2_t2 = -2*(l1+l2+l3) + 3*s + m + 2*r + 2*u\n n3 = l2-r\n n4 = l3-u\n if n2_t2%2 != 0:\n continue\n n2 = n2_t2/2\n if n1 < 0 or n2 < 0 or n3 < 0 or n4 < 0:\n continue\n\n denom = factorial(n1)*factorial(n2)*factorial(n3)*factorial(n4)*factorial(3)**n1*factorial(4)**n2*factorial(m)*factorial(r)**2*factorial(u)**2\n\n nom = double_factorial(2*l1-1)*factorial(l2)*factorial(l3)\n S+= Fraction(nom, denom)\n\n return S", "def nCWRk(n, r):\n val = 1\n for i in range(1, r+1):\n val *= n + r - i\n val //= i\n return val", "def cppi(risky_r, safe_r=None, m=3, start=initial, floor=0.8, riskfree_rate=risk_free_rate, drawdown=None):\n # set up the CPPI parameters\n dates = risky_r.index\n n_steps = len(dates)\n account_value = start\n floor_value = start*floor\n peak = account_value\n if isinstance(risky_r, pd.Series): \n risky_r = pd.DataFrame(risky_r, columns=[\"R\"])\n\n if safe_r is None:\n safe_r = pd.DataFrame().reindex_like(risky_r)\n safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number\n # set up some DataFrames for saving intermediate values\n account_history = pd.DataFrame().reindex_like(risky_r)\n risky_w_history = pd.DataFrame().reindex_like(risky_r)\n cushion_history = pd.DataFrame().reindex_like(risky_r)\n floorval_history = pd.DataFrame().reindex_like(risky_r)\n peak_history = pd.DataFrame().reindex_like(risky_r)\n\n for step in range(n_steps):\n if drawdown is not None:\n peak = np.maximum(peak, account_value)\n floor_value = peak*(1-drawdown)\n cushion = (account_value - floor_value)/account_value\n risky_w = m*cushion\n risky_w = np.minimum(risky_w, 1)\n risky_w = np.maximum(risky_w, 0)\n safe_w = 1-risky_w\n risky_alloc = account_value*risky_w\n safe_alloc = account_value*safe_w\n # recompute the new account value at the end of this step\n account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])\n # save the histories for analysis and plotting\n cushion_history.iloc[step] = cushion\n risky_w_history.iloc[step] = risky_w\n account_history.iloc[step] = account_value\n floorval_history.iloc[step] = floor_value\n peak_history.iloc[step] = peak\n risky_wealth = start*(1+risky_r).cumprod()\n backtest_result = {\n \"Wealth\": account_history,\n \"Risky Wealth\": risky_wealth, \n \"Risk Budget\": cushion_history,\n \"Risky Allocation\": risky_w_history,\n \"m\": m,\n \"start\": start,\n \"floor\": floor,\n \"risky_r\":risky_r,\n \"safe_r\": safe_r,\n \"drawdown\": drawdown,\n \"peak\": peak_history,\n \"floor\": floorval_history\n }\n return backtest_result", "def roc(ground_truth, pred_result):\n assert len(ground_truth)==len(pred_result)\n tp, fp, tn, fn = 1e-8, 1e-8, 1e-8, 1e-8\n for i in range(len(ground_truth)):\n if ground_truth[i][0] == 0 and pred_result[i][0] == 0:\n tp += 1\n elif ground_truth[i][0] == 0 and pred_result[i][0] == 1:\n fn += 1\n elif ground_truth[i][0] == 1 and pred_result[i][0] == 0:\n fp += 1\n elif ground_truth[i][0] == 1 and pred_result[i][0] == 1:\n tn += 1\n roc_tpr, roc_fpr = tp/(tp+fn), fp/(fp+tn)\n return (roc_fpr, roc_tpr)", "def get_covrad(elem):\n return covrad[get_num(elem)]", "def apply_per_reducts_cmip6():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Get the year-on-year proportional reductions in RCP2.6\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply year on year proportional reductions (globally uniform) from RCP2.6 in 2015 onwards\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_rcp262015.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_rcp262015.nc\")\n\n return", "def uCSIsYiRadicals(code):\n ret = libxml2mod.xmlUCSIsYiRadicals(code)\n return ret", "def calc_ic(data):\n return scs.spearmanr(data[:, 0], data[:, 1]).correlation", "def calc_rocchio(original, relevant_vectors, nonrelevant_vectors):\n print('orig' + str(len(original)))\n if len(relevant_vectors) > 0: print('rv 1st len' + str(len(relevant_vectors[0])))\n if len(nonrelevant_vectors) > 0: print('nr 1st len' + str(len(nonrelevant_vectors[0])))\n rv_count = len(relevant_vectors)\n nr_count = len(nonrelevant_vectors)\n rv_sum = np.add.reduce(relevant_vectors)\n print('rv_sum' + str(rv_sum) + 'rv_count' + str(rv_count))\n nr_sum = np.add.reduce(nonrelevant_vectors)\n print('nr_sum' + str(nr_sum) + 'nr_count' + str(nr_count))\n updated_relevance = cg.ROCCHIO_ALPHA * original \\\n + cg.ROCCHIO_BETA * (1/rv_count if rv_count else 1) * rv_sum \\\n - cg.ROCCHIO_GAMMA * (1/nr_count if nr_count else 1) * nr_sum\n #only keep terms above minimum threshold (also serves to exclude negative values)\n print('before')\n print(updated_relevance[:40])\n updated_relevance = [0 if wgt < cg.ROCCHIO_MIN else wgt for wgt in updated_relevance]\n print('after')\n print(updated_relevance[:40])\n return updated_relevance", "def _c_numeric(self, rij):\n radial_fun = np.zeros((self.lmax+1, self.nmax))\n radial_fun[0,1] = 1.0\n\n #Get local references to these variables so that we don't need `self`\n #all over in the overbasis calculation below.\n alpha = self.alpha\n rb = self.rb \n for n in range(1, self.nmax+1):\n argbess = 2*alpha*rb[n-1]*rij\n ep = np.exp(-alpha*(rij + rb[n-1])**2)\n em = np.exp(-alpha*(rij - rb[n-1])**2)\n #In the loops below, msb prefix refers to modified spherical bessel.\n for l in range(self.lmax+1):\n if l == 0:\n if argbess == 0.0:\n msb_fi_ki_l = np.exp(-alpha*(rb[n-1]**2 + rij**2))\n else:\n #msb_fi_ki_lm = cosh(arg_bess)/arg_bess\n #msb_fi_ki_l = sinh(arg_bess)/arg_bess\n msb_fi_ki_lm = 0.5 * (em + ep) / argbess\n msb_fi_ki_l = 0.5 * (em - ep) / argbess\n else:\n if argbess == 0.0:\n msb_fi_ki_l = 0.0\n else:\n msb_fi_ki_lmm = msb_fi_ki_lm\n msb_fi_ki_lm = msb_fi_ki_l\n msb_fi_ki_l = msb_fi_ki_lmm-(2*l-1)*msb_fi_ki_lm/argbess\n\n radial_fun[l,n-1] = msb_fi_ki_l #* rb[n-1]\n fc = fcut(rij, self.rcut, self.trans_width)\n return np.dot(radial_fun, self.transformbasis)*fc", "def rsi(date):\n\n # print(float(r_json['Technical Analysis: RSI'][date]['RSI']))\n return float(r_json['Technical Analysis: RSI'][date]['RSI'])", "def csi(self):\n return self.table[0, 0] / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0])", "def elixcomoscore(df,col_icd,col_id):\n output = icdtoelixcomo(df,col_icd)\n output = output.loc[output['ElixComo'].notnull(),:]\n output = output.loc[:,[col_id,'ElixComo','ElixComoScore']]\n output = output.drop_duplicates()\n output = pd.DataFrame(output.groupby(col_id)['ElixComoScore'].sum()).reset_index()\n output = output.merge(df.loc[:,[col_id]].drop_duplicates(),how='outer',left_on=col_id,right_on=col_id).fillna(0.)\n return output", "def scoreCirc_CmosVoltageReference(circuit, gen, indi, makeRedundancyInMatrix): #TODO 6.9.2016 napisi cost function ki se sklada z evaluateCmosVoltageRef\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n disfCount = 0\n \n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = vdd_s_t1 + vdd_s_t1_d + \\\n\t vdd_s_t2 + vdd_s_t2_d + \\\n\t vdd_s_t3 + vdd_s_t3_d + \\\n\t vdd_s_r1 + vdd_s_r1_d + \\\n\t vdd_s_r2 + vdd_s_r2_d + \\\n\t vdd_s_r3 + vdd_s_r3_d + \\\n\t (100*powe)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results", "def rosenbrock(X):\n sum_r = 0\n for i in range(len(X)-1):\n sum_r += 100*(X[i+1]-X[i]**2)**2 + (1-X[i])**2\n #sum_r += rosenbrock(X[i], X[i+1])\n return sum_r", "def getcorrelation(movieid1,movieid2):\n\n #the initialized integer, cosine_sum, has an initialized value of -100\n #such that in the case where correlation shouldn't be updated, the value\n #remains unchanged\n cosine_sum = NEGATIVE\n #variable r_a,i and r_b,i in the formula\n r_mv1 = 0\n r_mv2 = 0\n #numerator\n nume_sum = 0\n #two parts in the denominator (before taking square root)\n deno_mv1_sum = 0\n deno_mv2_sum = 0\n denominator = 0\n #variable that keeps track of count of common users\n currentCommon = 0\n\n #firstly check if the count of user passes the threshold for each movie\n if(len(dictMovie.get(movieid1))<threshold or\n len(dictMovie.get(movieid2))<threshold):\n #if either does not, returns a negative correlation (to be invalid)\n return cosine_sum\n #if both pass threshold, get the intersection (of users) of two movies\n else:\n intersect=dictMovie.get(movieid1).intersection(dictMovie.get(movieid2))\n #if the number of common users is smaller than threshold, return\n if (len(intersect) < threshold):\n return cosine_sum\n #otherwise, start counting correlation\n else:\n #get the average rating of two movies\n mv1_bar = float(dictMovieRate.get(movieid1))\n mv2_bar = float(dictMovieRate.get(movieid2))\n #iterate through common users and use formula\n for commonuser in intersect:\n #increment common user count\n currentCommon += 1\n r_mv1 = int(dictUser.get(commonuser).get(movieid1))\n r_mv2 = int(dictUser.get(commonuser).get(movieid2))\n nume_sum += ( (r_mv1)-mv1_bar )*( (r_mv2)-mv2_bar )\n deno_mv1_sum += ( (r_mv1)-mv1_bar )**2\n deno_mv2_sum += ( (r_mv2)-mv2_bar )**2\n #when done with denominator separate calculation, combine\n denominator = math.sqrt(deno_mv1_sum * deno_mv2_sum)\n #handle the case where denominator=0 (invalid)\n if denominator == 0:\n return cosine_sum\n #otherwise, successful. return valid values and pass in\n #common count to global variable for program to catch\n else:\n cosine_sum = nume_sum / denominator\n global currentCommonCount\n currentCommonCount = currentCommon\n return cosine_sum", "def value(self):\n\t\tRS = 0\n\t\tfor mu in range(self.g.dim()):\n\t\t\tfor nu in range(self.g.dim()):\n\t\t\t\tRS += self.g.uu(mu,nu)*self.Ric.dd(mu,nu)\n\t\treturn RS.simplify()", "def clinopyroxene_98():\n\n rho = 3190.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 237.8; C[0,1] = 83.5; C[0,2] = 80.; C[0,3] = 0.; C[0,4] = 9.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 183.6; C[1,2] = 59.9; C[1,3] = 0.; C[1,4] = 9.5; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 229.5; C[2,3] = 0.; C[2,4] = 48.1; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 76.5; C[3,4] = 0.; C[3,5] = 8.4\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 73.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 81.6\n\n return C, rho", "def classfy(data, clu, k):\n clulist = cal_dis(data, clu)\n clusterRes = grouping(data, clulist)\n clunew = new_Cen_Cal(data, clusterRes, k)\n err = clunew - clu\n\n return err, clunew, k, clusterRes", "def calculate_correction(filedic):\n lanczos_cera = xr.open_mfdataset(filedic['lanczos(CERA)'], combine='by_coords')\n lanczos_noaa = xr.open_mfdataset(filedic['lanczos(20CR)'], combine='by_coords')\n return lanczos_noaa.drop('number').squeeze() - lanczos_cera.drop('number').squeeze()", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def kro(self, sw:np.ndarray) -> np.ndarray:\n kro0 = self.kro0\n sor = self.sor\n swc = self.swc\n no = self.no\n res = np.zeros_like(sw)\n cond1 = np.logical_and(swc <= sw, sw <= 1 - sor)\n res[cond1] = kro0 * ((1 - sw[cond1] - sor) / (1 - sor - swc)) ** no\n cond2 = np.logical_and(0.0 < sw, sw < swc)\n res[cond2] = 1 + (kro0 - 1) / swc * sw[cond2]\n res[sw > 1 - sor] = 0.0\n res[sw <= 0.0] = 1.0\n return res", "def clinopyroxene_92():\n\n rho = 3327.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 257.3; C[0,1] = 85.9; C[0,2] = 76.2; C[0,3] = 0.; C[0,4] = 7.1; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 216.2; C[1,2] = 71.8; C[1,3] = 0.; C[1,4] = 13.3; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 260.2; C[2,3] = 0.; C[2,4] = 33.7; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.2; C[3,4] = 0.; C[3,5] = 10.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 70.6; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 85.8\n\n return C, rho", "def getRPSA(ChargeSA):\n temp=0.0\n for i in ChargeSA:\n temp=temp+i[2]\n if temp == 0.0:\n return 0.0\n return getPSA(ChargeSA)/temp", "def ssr(self):\n return (self.resid * self.resid).sum(0)", "def RyserFormula(num_coincidence: int, U_st: numpy.ndarray) -> complex:\n\n value_perm = 0\n set = CreateSubset(num_coincidence)\n\n for subset in set:\n num_elements = len(subset)\n value_times = 1\n for i in range(num_coincidence):\n value_sum = 0\n for j in subset:\n value_sum += U_st[i, j]\n value_times *= value_sum\n value_perm += value_times * (-1) ** num_elements\n value_perm *= (-1) ** num_coincidence\n return value_perm", "def compute_risk(mua,vra,alloc,slack):\n mu = 0\n var = 0\n for sa in alloc :\n mu = mu + mua[sa-1]\n var = var + vra[sa-1]\n dist = stats.norm(loc=mu,scale=np.sqrt(var))\n r = dist.sf(slack)\n return r", "def calc_fpr(self):\n self.fpr = (1 - (1 - 1 / float(self.size)) ** (self.hash_count * self.num_uniq_elems)) ** self.hash_count\n return self.fpr", "def lucas(n):\n\t'''\n\tlucL1=1\n\tlucL2=2\n\tif n<0:\n\t\tprint(\"please enter positive int value\")\n\n\telif n==0:\n\t\treturn 2\n\n\telif n==1:\n\t\treturn 1\n\n\telse:\n\t\tfor i in range(n-1):\n\t\t\tlucC=lucL1+lucL2\n\t\t\tlucL2=lucL1\n\t\t\tlucL1=lucC\n\t\treturn lucC\n\t'''\n\treturn sum_series(n,2,1)", "def en(self, crd):\n en = [0.0, 0.0]\n for co in crd.flatten():\n en[0] += co**2\n en[1] += (co-1)**2\n return en", "def rosenbrock(indiv):\n soma = 0\n for i in range(len(indiv)-1):\n soma+= (1-indiv[i])**2 + 100*(indiv[i+1]-(indiv[i])**2)**2\n return soma", "def CRIT_U(p,tv): \n rho=RHO(p,tv)\n # Note: 144 = 2x 72 N; 0.3 = 0.6 drag coef * surface area 0.5 m**2\n uc=np.power(np.divide(144,np.multiply(rho,0.3)),0.5)\n \n return uc", "def rsi(df, lag):\n\n def avg_gain():\n gains = [\n df[i][\"c\"] - df[i - 1][\"c\"] if df[i][\"c\"] >= df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_gain = [sum(gains[:lag]) / float(lag)]\n [avg_gain.append(((avg_gain[-1] * 13) + gain) / 14.0) for gain in gains[lag:]]\n return avg_gain\n\n def avg_loss():\n losses = [\n abs(df[i][\"c\"] - df[i - 1][\"c\"]) if df[i][\"c\"] < df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_loss = [sum(losses[:lag]) / float(lag)]\n [avg_loss.append(((avg_loss[-1] * 13) + loss) / 14.0) for loss in losses[lag:]]\n return avg_loss\n\n gains = avg_gain()\n losses = avg_loss()\n\n raw_rsi = [\n round(100 - (100 / (1 + (gains[i] / losses[i]))), 2) for i in range(len(gains))\n ]\n df = df[-1 * len(raw_rsi) :]\n\n return [raw_rsi[i] for i in range(len(df))]", "def computeRmse(model, data, n):\n print \"RESULT_data:%s \" % ((data.map(lambda x: (x[0], x[1]))).take(50))\n predictions1 = model.predictAll(data.map(lambda x: (x[0], x[1])))\n print \"RESULT1: %s\" % predictions1\n predictionsAndRatings = predictions1.map(lambda x: ((x[0], x[1]), x[2])) \\\n .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \\\n .values()\n #print \"RESULT2: %s\" % predictions1.take(11)\n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))", "def prect(precc, precl):\n var = precc + precl\n var = convert_units(var, \"mm/day\")\n var.long_name = \"Total precipitation rate (convective + large-scale)\"\n return var", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def _calcConsonanceMetric(self, minMajRatio, metricalAccentLevel):\n consonanceScores = []\n\n # step through candidate triads and calculate consonance score\n for triad in self._candidateTriads:\n triadType = triad.getTriadType()\n score = float(self._triadConsonanceScores[triadType])\n\n # modify score based on minMaj ratio\n minMajImpact = self._calcMinMajRatioImpact(minMajRatio,\n metricalAccentLevel, triadType)\n score += minMajImpact\n consonanceScores.append(score)\n return consonanceScores", "def prada(self):\n scale_factor = 1.0 / (1.0 + self.snapshot.header.redshift)\n r200c_physical = self.r200c * scale_factor / 1000.0 # units Mpc\n\n v200 = (\n (self.snapshot.const.G * self.m200c)\n / r200c_physical\n * self.snapshot.const.Mpc ** 2\n / 1000.0 ** 2\n ) ** 0.5 # units km/s\n\n def y(x, vmax, v200):\n func = np.log(1 + x) - (x / (1 + x))\n return ((0.216 * x) / func) ** 0.5 - (vmax / v200)\n\n concentration = np.zeros((len(self.vmax)))\n for halo in range(self.N_halos):\n if v200[halo] > self.vmax[halo]:\n concentration[halo] = -9999.0\n else:\n try:\n concentration[halo] = newton(\n y, x0=5.0, args=(self.vmax[halo], v200[halo])\n )\n except:\n concentration[halo] = -9999.0\n\n return concentration", "def scoreCirc_VoltageReference(circuit, gen, indi, makeRedundancyInMatrix):\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n badSweep = 0\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateVoltageRef(gen, indi)\n disfCount = 0\n \n vdd_sweep = np.array(results['vout_vdd']['nominal'], dtype=float) #This line changes Nones to np.nans\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep)):\n disfCount = disfCount + 1\n vdd_s = 0\n vdd_s_d = 0\n #print \"tukej!\", vdd_sweep_scale\n else:\n x = np.median(vdd_sweep)\n vdd_s = abs(x - VREF) #if x > VREF else 0\n vdd_s_d = np.max(vdd_sweep) - np.min(vdd_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n #print \"tukiii\", vdd_sweep_scale\n if (vdd_sweep_scale[-1]<20): #20V\n\tbadSweep = badSweep + 1\n \n rload_sweep = np.array(results['vout_rload']['nominal'], dtype=float)\n rload_sweep_scale = np.array(results['vout_rload_scale']['nominal'], dtype=float)\n # if measurement is empty\n if np.any(np.isnan(rload_sweep)):\n disfCount = disfCount + 1\n rload_s = 0\n rload_s_d = 0\n else:\n x = np.median(rload_sweep)\n rload_s = abs(x - VREF) #if x > VREF else 0\n rload_s_d = np.max(rload_sweep) - np.min(rload_sweep)\n #if sweep did not finish completely - add to score\n #check last scale value in runme2!!\n if (rload_sweep_scale[-1]<100e3): #100kOhm\n\tbadSweep = badSweep + 1\n \n temp_sweep = np.array(results['vout_temp']['nominal'], dtype=float)\n temp_sweep_scale = np.array(results['vout_temp_scale']['nominal'], dtype=float)\n # if measurement is empty OR sweep did not finish completely - check last scale value in runme2!!\n if np.any(np.isnan(temp_sweep)):\n disfCount = disfCount + 1\n temp_s = 0\n temp_s_d = 0\n else:\n x = np.median(temp_sweep)\n temp_s = abs(x - VREF) #if x > VREF else 0\n temp_s_d = np.max(temp_sweep) - np.min(temp_sweep)\n if (temp_sweep_scale[-1]<120): #120 deg celsius\n\tbadSweep = badSweep + 1\n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = (vdd_s) + (vdd_s_d) + 5*(rload_s) + 5*(rload_s_d) + (100*temp_s) + (100*temp_s_d) + (100*powe) + badSweep*100\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, temp_s, temp_s_d, powe\n #print vdd_s, vdd_s_d, rload_s, rload_s_d, 100*temp_s, 100*temp_s_d, 100*powe\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results", "def run_cusum(data, old_values):\n try:\n value = data[\"Value\"]\n lcl = old_values.loc[old_values[\"Kind\"] == \"LCL\", \"Value\"].values[0]\n ucl = old_values.loc[old_values[\"Kind\"] == \"UCL\", \"Value\"].values[0]\n sum_positive = old_values.loc[old_values[\"Kind\"] == \"SP\", \"Value\"].values[0]\n sum_negative = old_values.loc[old_values[\"Kind\"] == \"SN\", \"Value\"].values[0]\n differences_pos = (value - ucl) / (ucl - lcl)\n differences_neg = (lcl - value) / (ucl - lcl)\n sum_positive = max(0, sum_positive + differences_pos.sum())\n sum_negative = max(0, sum_negative + differences_neg.sum())\n result = [\n [\"SP\", sum_positive],\n [\"SN\", sum_negative],\n [\"LCL\", lcl],\n [\"UCL\", ucl],\n ]\n output = pd.DataFrame(result, columns=[\"Kind\", \"Value\"])\n return output\n except:\n raise", "def WakeVorticityFromCirculation_Cont(r_cp,Gamma_cp,R,U0,Omega,bSwirl,method='analytical',bHighThrustCorr=True):\n r_cp = np.asarray(r_cp).ravel()\n Gamma_cp = np.asarray(Gamma_cp).ravel()\n if r_cp[0]==0:\n r_cp[0]=r_cp[1]*0.5;\n # Non dimensional parameters\n k = Omega*Gamma_cp/(np.pi*U0**2)\n vr_bar = r_cp/R \n lambda_r = Omega*r_cp/U0\n # Finding inductions\n a,a_prime,misc= InductionsFromCirculation_VC_Cont(vr_bar,lambda_r,k,bSwirl,method=method,bHighThrustCorr=bHighThrustCorr)\n # Computing convection\n misc['Vz'] = U0*(1-2*a)\n misc['h'] = misc['Vz']*2*np.pi/(Omega*(1+2*a_prime))\n misc['a'] = a\n misc['a_prime'] = a_prime\n misc['Gamma_cp'] = Gamma_cp\n misc['r_cp'] = r_cp\n # Vortex intensities\n Gamma_tilde = Gamma_cp - np.concatenate((Gamma_cp[1:],[0])) #Gamma_tilde = Gamma_i-Gamma_{i+1}\n gamma_t = - Gamma_tilde/misc['h']\n if bSwirl:\n gamma_l = Gamma_tilde/(2*np.pi*r_cp)\n Gamma_r = - Gamma_cp[0]\n else:\n gamma_l = 0\n Gamma_r = 0\n return gamma_t,gamma_l,Gamma_r,misc", "def _transcode_crf(binary: Path, params: str, crf: float) -> Dict[str, float]:\n with tqdm(FILES, leave=False, desc=f'Try CRF={crf}') as paths:\n scores = [_transcode_crf_one(binary, params, crf, p) for p in paths]\n return {k: hmean([score[k] for score in scores]) for k in COLUMNS}", "def CC_CLS_CC_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_CLS_CC']]\n min_value = min(Feature_DF.loc[:,'CC_CLS_CC'])\n Feature_DF.loc[:,'CC_CLS_CC_TRS'] = Feature_DF.loc[:,'CC_CLS_CC'].apply(lambda x : (1+x-min_value)**(9/10))\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_CLS_CC_TRS']]\n\n return Feature_DF", "def CRS(site):\n return np.dot(CR(np.pi/2**(site)),SWAP)", "def ls8_sr_corr(img):\n return img.select(['B2'], ['BLUE']).float().multiply(0.8850).add(183).int16()\\\n .addBands(img.select(['B3'], ['GREEN']).float().multiply(0.9317).add(123).int16())\\\n .addBands(img.select(['B4'], ['RED']).float().multiply(0.9372).add(123).int16())\\\n .addBands(img.select(['B5'], ['NIR']).float().multiply(0.8339).add(448).int16())\\\n .addBands(img.select(['B6'], ['SWIR1']).float().multiply(0.8639).add(306).int16())\\\n .addBands(img.select(['B7'], ['SWIR2']).float().multiply(0.9165).add(116).int16())\\\n .addBands(img.select(['pixel_qa'], ['PIXEL_QA']).int16())\\\n .addBands(img.select(['radsat_qa'], ['RADSAT_QA']).int16())\\\n .copyProperties(img)\\\n .copyProperties(img, ['system:time_start', 'system:time_end', 'system:index', 'system:footprint'])", "def lucas(n):\n lucval = sum_series(n, 2, 1)\n print(lucval)\n return lucval", "def calc_rhoenc(mass,r,rmax):\n idx = (r<rmax)\n return mass[idx].sum()/sphvol(rmax)", "def ls5_sr_corr(img):\n return img.select(['B1'], ['BLUE']).float().multiply(0.91996).add(37).int16()\\\n .addBands(img.select(['B2'], ['GREEN']).float().multiply(0.92764).add(84).int16())\\\n .addBands(img.select(['B3'], ['RED']).float().multiply(0.8881).add(98).int16())\\\n .addBands(img.select(['B4'], ['NIR']).float().multiply(0.95057).add(38).int16())\\\n .addBands(img.select(['B5'], ['SWIR1']).float().multiply(0.96525).add(29).int16())\\\n .addBands(img.select(['B7'], ['SWIR2']).float().multiply(0.99601).add(20).int16())\\\n .addBands(img.select(['pixel_qa'], ['PIXEL_QA']).int16())\\\n .addBands(img.select(['radsat_qa'], ['RADSAT_QA']).int16())\\\n .copyProperties(img)\\\n .copyProperties(img, ['system:time_start', 'system:time_end', 'system:index', 'system:footprint'])", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def ci2se(ci):\n\n ci = sorted(ci)\n\n return (ci[1] - ci[0]) / (2 * 1.96)", "def findROICentres(smt_file): #{\n rois = []\n ary = None\n state = 0\n roi_centres = { \n 'head_ct': None,\n 'neck_cb': None,\n 'shoulder_l': None,\n 'shoulder_r': None,\n 'elbow_l': None,\n 'elbow_r': None,\n 'pelvis_c': None,\n 'wrist_l': None,\n 'wrist_r': None,\n 'hip_l': None,\n 'hip_r': None,\n 'knee_l': None,\n 'knee_r': None,\n 'ankle_l': None,\n 'ankle_r': None,\n 'toes_e': None}\n\n vrbMsg(1, 'findROICentres() smt_file = ' + smt_file)\n # Read the smoothed Woolz object and create a NumPy array from it\n err_num, smt_obj = readWoolzObj(smt_file)\n if(not bool(err_num)): #{\n\t err_num, org, ary = wlzObjToNP(smt_obj)\n #}\n sz = np.shape(ary)\n vrbMsg(5, 'findROICentres() object size = ' + str(sz))\n # Work down the scan finding coordinates, ordering has dependency\n # but this is checked in the individual functions\n if((sz[0] >= img_size_min) and (sz[1] >= img_size_min)): #{\n findProfileHeadCT(roi_centres, ary)\n findProfileToes(roi_centres, ary)\n #}\n if(bool(roi_centres['toes_e']) and bool(roi_centres['head_ct'])): #{\n findProfileShoulders(roi_centres, ary)\n findProfilePelvis(roi_centres, ary)\n findProfileHip(roi_centres, ary)\n findProfileElbow(roi_centres, ary)\n findProfileWrist(roi_centres, ary)\n findProfileKnee(roi_centres, ary)\n findProfileAnkle(roi_centres, ary)\n rois = roi_centres\n for cen in rois: #{\n # numpy gives int64 which is not always handled (eg by json) so convert\n pos = rois[cen]\n if(not (pos is None)): #{\n rois[cen] = [int(pos[0]), int(pos[1])]\n #}\n #}\n #}\n vrbMsg(1, 'findROICentres() rois = ' + str(rois))\n return rois", "def _c2c_cost(sclst, eclst):\n def _c2c(point):\n _c_sum = 0\n for pt in eclst.points:\n _c_sum += point.frequency(pt)\n return _c_sum\n return int(sum(map(_c2c, sclst.points)))", "def calculateSNR(self):\n pass", "def SumaryPresupuesto(vj):\n\n sumaUSD = sumaCUC = totalUSD = totalCUC = 0.0\n\n for row in vj.tbPresupesto.rows.values():\n cambio = row.cambio\n moneda = row.moneda\n value = row.value\n\n if moneda == MD.Usd:\n sumaUSD += value\n totalUSD += value\n totalCUC += ( value * cambio )\n else:\n sumaCUC += value\n totalCUC += value\n totalUSD += ( value / cambio )\n\n vj.PresupCuc = totalCUC\n if totalUSD>0 and totalCUC>0: \n vj.MD.SetChange( totalCUC/totalUSD, MD.Usd, MD.Cuc )", "def calcula_variancias(self):\n for index in range(1, self.n_rodadas+1):\n self.var_x1 += (self.x1_med_rodada[index] - self.x1_med_total) ** 2\n self.var_w1 += (self.w1_med_rodada[index] - self.w1_med_total) ** 2\n self.var_nq1 += (self.nq1_med_rodada[index] - self.nq1_med_total) ** 2\n self.var_ns1 += (self.ns1_med_rodada[index] - self.ns1_med_total) ** 2\n self.var_n1 += (self.n1_med_rodada[index] - self.n1_med_total) ** 2\n self.var_t1 += (self.t1_med_rodada[index] - self.t1_med_total) ** 2\n self.var_w1_med += (self.var_w1_med_rodada[index] - self.var_w1_med_total) ** 2\n\n self.var_x2 += (self.x2_med_rodada[index] - self.x2_med_total) ** 2\n self.var_w2 += (self.w2_med_rodada[index] - self.w2_med_total) ** 2\n self.var_nq2 += (self.nq2_med_rodada[index] - self.nq2_med_total) ** 2\n self.var_ns2 += (self.ns2_med_rodada[index] - self.ns2_med_total) ** 2\n self.var_n2 += (self.n2_med_rodada[index] - self.n2_med_total) ** 2\n self.var_t2 += (self.t2_med_rodada[index] - self.t2_med_total) ** 2\n self.var_w2_med += (self.var_w2_med_rodada[index] - self.var_w2_med_total) ** 2\n\n self.var_x1 /= (self.n_rodadas - 1)\n self.var_w1 /= (self.n_rodadas - 1)\n self.var_nq1 /= (self.n_rodadas - 1)\n self.var_ns1 /= (self.n_rodadas - 1)\n self.var_n1 /= (self.n_rodadas - 1)\n self.var_t1 /= (self.n_rodadas - 1)\n self.var_w1_med /= (self.n_rodadas - 1)\n\n self.var_x2 /= (self.n_rodadas - 1)\n self.var_w2 /= (self.n_rodadas - 1)\n self.var_nq2 /= (self.n_rodadas - 1)\n self.var_ns2 /= (self.n_rodadas - 1)\n self.var_n2 /= (self.n_rodadas - 1)\n self.var_t2 /= (self.n_rodadas - 1)\n self.var_w2_med /= (self.n_rodadas - 1)", "def coverage(self, user_list):\n all_recom_set = set()\n all_item = set(self.train['movieId'].values)\n print('\\nCalculated coverage: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n all_recom_set.update(recom_item)\n print('\\nCoverage is: ', len(all_recom_set) / (len(all_item) * 1.0))\n return len(all_recom_set) / (len(all_item) * 1.0)", "def calculate_WR():\n C = df['rating_avg'].mean()\n m = df['rating_count'].quantile(0.9)\n v = df['rating_count']\n R = df['rating_avg']\n return (v/(v+m) * R) + (m/(m+v) * C)", "def standardize_sim_values(self):\n for user_id_A, row in self.sim_matrix.items(): # row is reference\n lA = len(self.users[user_id_A].covered_items)\n for user_id_B in row.keys():\n lB = len(self.users[user_id_B].covered_items)\n row[user_id_B] /= sqrt(lA*lB)\n assert row[user_id_B] <= 1", "def _GetConcentrationCorrectionMilliMolar(self):\n # calculate stoichiometric imbalance (how many more products are there\n # compared to substrates). Note that H2O isn't counted\n sum_logs = sum([c.coeff*numpy.log(c.phase.PhysiologicalValue())\n for c in self.reactants])\n \n _r = constants.R\n _t = constants.DEFAULT_TEMP\n return _r * _t * sum_logs", "def kpc_per_arcsec_from(self, redshift: float) -> float:\r\n return 1.0 / self.arcsec_per_kpc_proper(z=redshift).value", "def _find_cusps(self):\n N = self.level()\n s = []\n\n for d in arith.divisors(N):\n w = arith.gcd(d, N//d)\n if w == 1:\n if d == 1:\n s.append(Cusp(1,0))\n elif d == N:\n s.append(Cusp(0,1))\n else:\n s.append(Cusp(1,d))\n else:\n for a in range(1, w):\n if arith.gcd(a, w) == 1:\n while arith.gcd(a, d//w) != 1:\n a += w\n s.append(Cusp(a,d))\n return sorted(s)", "def scar(res):\n\n bcut = res['s2n']> np.percentile(res['s2n'],90)\n x = res['Pcad'][bcut]\n x -= min(x)\n x /= max(x)\n y = (res['t0cad']/res['Pcad'])[bcut]\n\n D = np.vstack([x,y]).T\n tree = cKDTree(D)\n d,i= tree.query(D,k=2)\n return np.percentile(d[:,1],90)", "def cci(self) -> float:\n return self._cci", "def get_chisqrs(prf,diff,nbins): \n off_pulse = np.zeros(39)\n off_pulse[:20] = prf[:20]\n off_pulse[20:] = prf[45:] #Making off pulse region\n # print(\"Off pulse Region \",off_pulse)\n op_rms = np.var(off_pulse) #Rms\n # print(\"Off pulse RMS \",op_rms)\n s = 0\n for d in diff:\n s += d**2/op_rms\n\n s = s/(nbins - 1)\n # print(\"Chisqr value = \",s)\n\n return s", "def computeCRCD3(self, u0, u1, u2, u3, u4, u5, u6, u7):\n tmp = u0\n bs = [u1, u2, u3, u4, u5, u6, u7]\n for val in bs:\n for j in range(16):\n if tmp & (1 << 15):\n tmp <<= 1\n if val & (1 << 15):\n tmp += 1\n tmp ^= 0x8005\n else:\n tmp <<= 1\n if val & (1 << 15):\n tmp += 1\n val <<= 1\n tmp &= 0xffff\n return tmp", "def specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value / focal.cardinal\n return round(result, 6)", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def rS_rhoS_c(self, m, z):\n Rvir = self.U.rVir(m, z)\n # concentration parameter\n #c = 10./(1.+z) * (m / self.m_nonlin)**(-0.2) # from Takada & Jain 2002\n c = 9./(1.+z) * (m / self.m_nonlin)**(-0.13) # Takada & Jain 2003\n # scale radius\n RS = Rvir / c # in Mpc/h\n # normalize the mass within rVir to be mVir\n rhoS = m / (4.*np.pi*RS**3)\n rhoS /= np.log(1.+c) - c/(1.+c) # (Msun/h) / (Mpc/h)^3\n return RS, rhoS, c", "def rS_rhoS_c(self, m, z):\n Rvir = self.U.rVir(m, z)\n # concentration parameter\n #c = 10./(1.+z) * (m / self.m_nonlin)**(-0.2) # from Takada & Jain 2002\n c = 9./(1.+z) * (m / self.m_nonlin)**(-0.13) # Takada & Jain 2003\n # scale radius\n RS = Rvir / c # in Mpc/h\n # normalize the mass within rVir to be mVir\n rhoS = m / (4.*np.pi*RS**3)\n rhoS /= np.log(1.+c) - c/(1.+c) # (Msun/h) / (Mpc/h)^3\n return RS, rhoS, c", "def test_rank_centrality():\n for case in iter_testcases('pairwise'):\n n_items = case[\"n_items\"]\n data = case[\"data\"]\n assert np.allclose(\n case[\"rc_est\"], rank_centrality(n_items, data),\n atol=ATOL, rtol=RTOL)", "def compute(self):\n\t\tmontant = self.spn_montant.value() #recuperation de la valeur de la spn\n\t\tdevise_from = self.cbb_devisesFrom.currentText() #recuperation de la valeur de la cbb\n\t\tdevise_to = self.cbb_devisesTo.currentText()\n\t\t\n\t\t# on effectue la conversion grace a currency_converter\n\t\t# on fait une gestion d'erreur pour eviter les conversions non trouvees\n\t\ttry :\n\t\t\t\"\"\"on essaie\"\"\"\n\t\t\tresultat = self.c.convert(montant, devise_from, devise_to)\n\t\t\n\t\texcept currency_converter.currency_converter.RateNotFoundError :\n\t\t\t\"\"\"si erreur\"\"\"\n\t\t\tprint(\"le taux de conversion n'a pas ete trouve\")\n\t\t\n\t\telse :\n\t\t\t\"\"\"si pas d'erreur\"\"\"\n\t\t\tself.spn_montantConverti.setValue(resultat) #affichage dans la cbb", "def calc_vcirc(r,menc,G=1.):\n if G is None: G = 1.\n return np.sqrt(G*menc/r)", "def secross(r=1):\n\n B = sesum(binary([[0,1,0],\n [1,1,1],\n [0,1,0]]),r)\n return B", "def define_cusum(self, data, remove_outliers):\n try:\n value = data[\"Value\"]\n value = self.remove_outliers(value, remove_outliers).reset_index(drop=True)\n result = self.cl_definition(value, 1.5)\n result = result.loc[\n (result[\"Kind\"] == \"UCL\") | (result[\"Kind\"] == \"LCL\"), :\n ]\n result[\"IdMachine\"] = data[\"IdMachine\"][0]\n result[\"IdSig\"] = data[\"IdSig\"][0]\n result[\"TimeStamp\"] = data.tail(1)[\"TimeStamp\"].values[0]\n return result\n except:\n raise", "def ICC(data, min_val, max_val, normalize = True):\n pre = 'ICC'\n log('start',pre=pre)\n log('data',data,pre=pre)\n # 0. normalize the data and create the constants\n new_data = [row[:] for row in data]\n for i,row in enumerate(data):\n for j,val in enumerate(row):\n domain_width = max_val - min_val\n new_val = (val - min_val)/domain_width if normalize else val\n new_data[i][j] = new_val\n log('new_data',new_data,pre=pre)\n num_of_judges = len(new_data[0])\n num_of_targets = len(new_data)\n log('num_of_judges',num_of_judges,pre=pre)\n log('num_of_targets',num_of_targets,pre=pre)\n\n # 1. calculate the means\n # 1.a calculate the row menas\n row_means = [sum(row)/num_of_judges for i,row in enumerate(new_data)]\n log('row_means',row_means,pre=pre)\n # 1.b calculate the column means\n col_means = [sum([row[j] for row in new_data])/num_of_targets for j in range(num_of_judges)]\n log('col_means',col_means,pre=pre)\n # 1.c calculate the total mean\n tot_mean = sum([sum(row) for row in new_data])/(num_of_judges * num_of_targets)\n log('tot_mean',tot_mean,pre=pre)\n\n # 2. do the sum of squares (SS)\n # 2.a row SS\n row_SS = [(val-tot_mean)**2 for val in row_means]\n log('row_SS',row_SS,pre=pre)\n # 2.b col SS\n col_SS = [(val-tot_mean)**2 for val in col_means]\n log('col_SS',col_SS,pre=pre)\n # 2.c all SS\n all_SS = [[(new_data[i][j]-tot_mean)**2 for j in range(num_of_judges)] for i in range(num_of_targets)]\n log('all_SS', all_SS, pre=pre)\n\n # 3. do the aggregated sum of squares\n SSB = sum(row_SS)*num_of_judges\n SSC = sum(col_SS)*num_of_targets\n SST = sum([sum(row) for row in all_SS])\n SSE = SST - SSC - SSB\n log('SSB',SSB,pre=pre)\n log('SSC',SSC,pre=pre)\n log('SST',SST,pre=pre)\n log('SSE',SSE,pre=pre)\n\n # 4. incorporate the degrees of freedom\n JMS = SSC/(num_of_judges-1)\n EMS = SSE/((num_of_judges-1) * (num_of_targets-1))\n BMS = SSB/(num_of_targets-1)\n log('JMS',JMS,pre=pre)\n log('EMS',EMS,pre=pre)\n log('BMS',BMS,pre=pre)\n\n # 5. do the final calc\n icc = (BMS-EMS)/BMS\n\n return icc,{'BMS':BMS,'EMS':EMS, 'JMS':JMS}", "def cre_confidence1(df):\r\n func = lambda x: 1 - np.abs(x.mean())\r\n return df.groupby('creline')['ffb_c'].transform(func)", "def compute_coriolis(self):\r\n # compute the Coriolis force\r\n self.coriolis.assign(\r\n project(-2*self.rho*cross(self.omega, self.u), self.V))", "def calc_R(xc, yc):\n return sqrt((x-xc)**2 + (y-yc)**2)", "def CalculateRoc2(dataArray,prefix,readsize,uniquehits,mappedreads,filename):\r\n starttime= time.time()\r\n uniquehits = float(uniquehits)\r\n readsize = float(readsize)\r\n \r\n \r\n entries = len(dataArray)\r\n \r\n\r\n resultmatrix = np.arange(entries*2)\r\n resultmatrix = resultmatrix.reshape(2,entries)\r\n \r\n maxrq = max(x.rq for x in dataArray)\r\n maxnm = max(x.nm[0] for x in dataArray)\r\n maxGaps= max(x.gaps[0] for x in dataArray)\r\n maxMism= max(x.mism[0] for x in dataArray)\r\n \r\n \r\n minrq = min(x.rq for x in dataArray)\r\n minnm = min(x.nm[0] for x in dataArray)\r\n minmq= min(x.mq[0] for x in dataArray)\r\n minGaps= min(x.gaps[0] for x in dataArray) \r\n minMism= min(x.mism[0] for x in dataArray) \r\n \r\n \r\n # adjust stepsize for rq since the score behaves the other way\r\n quants = [1,2,3,4,5]\r\n tempa = maxrq-minrq\r\n stepsize = tempa/5\r\n \r\n rqQuants = [round(minrq+(i-1)*stepsize,3) for i in quants]\r\n rqQuants.reverse()\r\n rqQuants[-1] =0 # last entry is rounded bigger than the smallest in the dataset\r\n \r\n nmQuants = [i*maxnm/5 for i in quants]\r\n GapsQuants = [i*maxGaps/5 for i in quants]\r\n MismQuants = [i*maxMism/5 for i in quants]\r\n\r\n rocvector = []\r\n \r\n # i = NM,l = RQ, k = MQ\r\n for l in quants: # RQ\r\n for k in quants: # GAPS\r\n for j in quants: # MISMATCH\r\n temparray = [m for m in dataArray if m.gaps[0] <= GapsQuants[k-1] and m.mism[0] <= MismQuants[j-1] and m.rq >=rqQuants[l-1]]\r\n \r\n\r\n tempids = [m.id for m in temparray]\r\n uniquereads = {}\r\n for i in xrange(0,len(tempids)):\r\n uniquereads[tempids[i]] = \"\"\r\n\r\n mappedreads = len(uniquereads)\r\n \r\n \r\n \r\n templength = len(temparray)\r\n \r\n if templength == 0:\r\n continue\r\n else:\r\n tempTP = sum(x.mr[0] for x in temparray)\r\n tempFP =templength-tempTP\r\n F = round((float(mappedreads)/ readsize) ,3)\r\n sens = round((tempTP/ uniquehits) * F,3)\r\n if tempFP == 0:\r\n spec = 0\r\n else:\r\n spec = round((tempFP / uniquehits) * F,3) \r\n \r\n rocvector.append([rqQuants[l-1],GapsQuants[k-1],MismQuants[j-1],tempTP,tempFP,templength,sens,spec,F])\r\n \r\n #print (\"%d\\t%d\\t%d\\t\" % (templength,tempTP,tempFP))\r\n\r\n #0 = NM 4 = TP 7 = sens\r\n #1 = RQ 5 = FP 8 = 1-spec\r\n #2 = GAPS 6 = P 9 = F\r\n #append needed for last entry in AUC calculation\r\n rocvector.append([0,0,0,0,0,0,0,0,0]) \r\n nproc = np.array(rocvector)\r\n \r\n #write the sens and specificity values from nproc according to the enumeration in line 149. \r\n #specificity is in cell -2\r\n # sensitivity is in cell -3\r\n sens = [i[-3] for i in nproc]\r\n spez = [i[-2] for i in nproc]\r\n \r\n # adjust ROC curve. It is necessary that it the 1-specificity ends in 1.\r\n # for the last record copy the predecessor in sens to it\r\n # and write 1 to specificity \r\n spez[-1] = 1\r\n sens[-1] = sens[-2]\r\n \r\n\r\n rocarray1 = np.array([sens,spez])\r\n rocarray1 = rocarray1.flatten('F')\r\n rocarray1= rocarray1.reshape((len(spez),2))\r\n \r\n rocarray = np.array([sens,spez])\r\n rocarray = rocarray.flatten('F')\r\n rocarray = rocarray.reshape((len(spez),2))\r\n rocarray = np.sort(rocarray.view('float,float'), order=['f0','f1'], axis=0).view(np.float)\r\n \r\n rocarrayCorrected = rocarray\r\n \r\n #print rocarrayCorrected\r\n # project points where...\r\n for m in range(len(rocarrayCorrected)-2,-1,-1):\r\n if (rocarrayCorrected[m,1] >= rocarrayCorrected[m+1,1]):\r\n rocarrayCorrected[m,1] = rocarrayCorrected[m+1,1]\r\n\r\n \r\n #print rocarrayCorrected \r\n plt.hold(True)\r\n plt.figure()\r\n plt.subplot(111)\r\n #plt.scatter(spez, sens, c='b', marker='o', facecolor='red')\r\n #plt.plot(rocarray[:,1], rocarray[:,0]\r\n plt.plot(rocarrayCorrected[:,1],rocarrayCorrected[:,0], marker='o', markersize=7,linestyle='--', color='r', label='projected')\r\n plt.plot(rocarray1[:,1], rocarray1[:,0], linestyle=\"None\",label='real',marker='.',color='g')\r\n plt.xlabel('1-specificity')\r\n plt.ylabel('sensitivity')\r\n plt.title(r'ROC:'+filename)\r\n plt.axis([-0.1,1.1,-0.1,1.1])\r\n plt.grid(True)\r\n plt.legend(loc='lower right')\r\n plt.tight_layout()\r\n plt.savefig(prefix + \"_ROC.pdf\",format='pdf')\r\n plt.clf \r\n \r\n \r\n AUC = trapezoidal_rule(rocarrayCorrected[:,1], rocarrayCorrected[:,0])\r\n \r\n fobj = open(prefix+\"_roctable.txt\",\"w\")\r\n fobj.write(\"RQ\\tGAPS\\tMM\\tPTP\\tFP\\tP\\tSn\\t1-Sp\\tF\\r\\n\")\r\n for i in xrange(0,len(rocvector),1):\r\n temp = [str(k) for k in rocvector[i]]\r\n tempstr = \"\\t\".join(temp)\r\n fobj.write(tempstr+\"\\r\\n\")\r\n\r\n endtime= time.time()\r\n return(round(AUC,3))", "def reduce(self, cres):\n return cres._reduce()" ]
[ "0.56749076", "0.5645466", "0.5644362", "0.5638017", "0.56055605", "0.55989563", "0.5504229", "0.54844487", "0.54743993", "0.54574186", "0.54527915", "0.5400491", "0.53917223", "0.53822356", "0.53790843", "0.5347198", "0.5308095", "0.530323", "0.5293776", "0.5282645", "0.5274876", "0.5268127", "0.5245522", "0.5244178", "0.52299285", "0.52232295", "0.5221863", "0.5220932", "0.52107495", "0.51973003", "0.51948005", "0.5186856", "0.51864547", "0.51548254", "0.51440763", "0.5141997", "0.5140054", "0.5135315", "0.5128611", "0.5127393", "0.5123805", "0.51206404", "0.51203465", "0.5111314", "0.5107559", "0.5102078", "0.5093727", "0.5088324", "0.50858426", "0.5073408", "0.50661993", "0.506304", "0.5061039", "0.5055079", "0.50525457", "0.5051452", "0.5050524", "0.50461435", "0.50378895", "0.50354284", "0.50317454", "0.50246984", "0.50232524", "0.5015766", "0.5010184", "0.5010159", "0.50084984", "0.5006461", "0.50055414", "0.5002599", "0.50014913", "0.50008625", "0.49996784", "0.4998364", "0.49960837", "0.499333", "0.49924707", "0.49922922", "0.4991559", "0.498466", "0.49827012", "0.4975619", "0.49744362", "0.4963229", "0.4963173", "0.496311", "0.4959313", "0.4954981", "0.4954981", "0.4953967", "0.49538493", "0.49531418", "0.4951367", "0.49495715", "0.4947317", "0.49430203", "0.49398413", "0.4939678", "0.4938678", "0.49363592" ]
0.58999944
0
The id of a node is assigned after topological sort in reversed topological order. (Root has id 0.) Use python object id if this node is not assigned a id
def id_str(self): if hasattr(self, 'id'): return str(self.id) else: return 'obj%s' % id(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_id(self) -> int:\r\n return self._node_id", "def tree_id(self):\n if self.is_root:\n return 0\n elif self._link is not None:\n return self._link.tree_id\n else:\n return self._tree_id", "def readjust_node_id(self, lowerbound = 1):\n for i in range(lowerbound, len(self.nodes)):\n if self.nodes[i]:\n self.nodes[i].node_id = i", "def init_id(root: TreeNode):\n current_id = [0]\n init_id_helper(root, current_id)\n return current_id[0]", "def node_id(self) -> int:\n return self.data[\"nodeId\"]", "def node_id(self) -> int:\n return self.data[\"nodeId\"]", "def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')", "def _determine_index(self, id):\n\n return bisect.bisect_left(self._max_node_ids, id)", "def object_id(o):\n return id(o)", "def min_node_id(self) -> int:\n\n if len(self.nodes) > 0:\n min_node_id = min([min(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])\n else:\n min_node_id = 0\n\n return min_node_id", "def _nextId(cls, id=None):\n if (not hasattr(DAG, \"_lastID\")):\n DAG._lastID = 0\n if (id):\n DAG._lastId = id\n DAG._lastID = DAG._lastID + 1\n return DAG._lastID", "def max_node_id(self) -> int:\n\n if len(self.nodes) > 0:\n max_node_id = max([max(nodes.id) if len(nodes) > 0 else 0 for nodes in self.nodes])\n else:\n max_node_id = 0\n\n return max_node_id", "def node_rev(self):\n return self.grid.edges['nodes'][self.j, self.orient]", "def _reset_node_ids(self, start_id: int):\n\n add_id = start_id - self.min_node_id()\n for tree_idx, _ in enumerate(self.nodes):\n self.nodes[tree_idx].nodes['id'] += add_id\n self.edges[tree_idx] += add_id", "def __str__(self):\n\n if self.head != None:\n return (str(self.id)+\" (\"+str(self.tail.id)+\", \"+\n str(self.head.id)+\")\")\n else:\n return str(self.id)+\" (\"+str(self.tail.id)+\", -1)\"", "def obj_id(self) -> int:\n return int(self.index.split(\"/\")[-1]) if self.index else None", "def establish_id(self):\n if self.config.node_id is None:\n self.config.node_id = str(uuid4()).replace('-', '')\n return self.config.node_id", "def id(obj):\n return obj", "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def get_node_id(objects, nodes, edges, node_id, edge_id):\n nodes = nodes.copy()\n if not isinstance(node_id, str):\n nodes[\"mm_noid\"] = node_id\n node_id = \"mm_noid\"\n\n results_list = []\n for row in tqdm(\n objects[[edge_id, objects._geometry_column_name]].itertuples(),\n total=objects.shape[0],\n ):\n if np.isnan(row[1]):\n\n results_list.append(np.nan)\n else:\n centroid = row[2].centroid\n edge = edges.loc[edges[edge_id] == row[1]].iloc[0]\n startID = edge.node_start\n start = nodes.loc[nodes[node_id] == startID].iloc[0].geometry\n sd = centroid.distance(start)\n endID = edge.node_end\n end = nodes.loc[nodes[node_id] == endID].iloc[0].geometry\n ed = centroid.distance(end)\n if sd > ed:\n results_list.append(endID)\n else:\n results_list.append(startID)\n\n series = pd.Series(results_list, index=objects.index)\n return series", "def getNodeId(self):\n if self.cursor:\n return self.cursor.nodeId\n return None", "def init_id_helper(node: TreeNode, current_id: List[int]) -> None:\n node.id = current_id[0]\n current_id[0] += 1\n if not isinstance(node, TreeNode):\n return\n init_id_helper(node.left, current_id)\n init_id_helper(node.right, current_id)", "def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order", "def _nodeNumToId(self, num):\n if num == BROADCAST_NUM:\n return BROADCAST_ADDR\n\n try:\n return self.nodesByNum[num][\"user\"][\"id\"]\n except:\n logging.warn(\"Node not found for fromId\")\n return None", "def _id(self):\n pass", "def __init__(self, node_id):\n # Assign ID and update class-counter\n self.id = node_id\n\n # Initialize\n self.is_sequence_end = False\n self.children = {}", "def next_node_id(self) -> int:\n i = 1\n while True:\n if i not in self.session.nodes:\n break\n i += 1\n return i", "def get_node_by_id(self, id):\r\n for n in self.nodes:\r\n if n.id==id:\r\n return n\r\n return None", "def get_edge_id(self):\n ident = self.eid\n self.eid += 1\n return ident", "def _auto_name(self):\n return \"node_\"+str(self._id)", "def topological_sort(self):\n \n visited = set()\n sorted_node = [] \n\n # sort all the node in the graph\n for i in self.node_set: \n if i not in visited: \n visited = self.topological_sort_helper(i, visited, sorted_node) \n \n visited.clear()\n return sorted_node", "def layer_parent_nid(self, layer_id):\n ...", "def central_node_id(self):\n if self._central_node_id is None:\n return self.nodes[0]\n else:\n return self._central_node_id", "def sort_id(self):\n return self.sort('id')", "def get_ordered_ids(tree):\n ordered_ids = []\n ordered_ids.extend(id(node) for node in tree.gen_tips())\n ordered_ids.extend(id(node) for node in tree.gen_internal_nodes())\n return ordered_ids", "def id(self):\n return self.__pairs[-1][1]", "def dumpid(tree, **kw):\n return hq[(_fuggle_3bf1b522_f487_4386_9466_62308ef1f105(unparse(tree) + ' ~~> ' + str(ast[tree])), ast[tree])[1]]", "def tree_id(self, value):\n self._tree_id = value", "def ion_node_id(self):\n return self._ion_node_id", "def __init__(self, head_vert):\n self.id = head_vert\n self.adjacent_vert_dict = {} # Nodes pointed to by the current node\n self.pointed_to_vert = {}\n self.pointed_vert_dict = {}", "def identifier(cls):\r\n\r\n logger = utils.get_logger()\r\n\r\n # FIXME: this is temporary warning\r\n info = get_node_info(cls)\r\n ident = None\r\n\r\n if info:\r\n ident = info.get(\"name\")\r\n\r\n if not ident:\r\n ident = utils.to_identifier(utils.decamelize(cls.__name__))\r\n if ident.endswith(\"_node\"):\r\n ident = ident[:-5]\r\n\r\n return ident", "def _reset_topological_order(self):\n self._topological_order = self._input_nodes[:]\n self.sorted = False", "def topological_sort(self):\n\t\t#detect leaves\n\t\tnumChildren = dict((n.name,0) for n in self.variables.values())\n\t\tfor n in self.variables.itervalues():\n\t\t\tfor p in n.parents:\n\t\t\t numChildren[p]+=1\n\t\t#do a BFS from leaves to get the reverse topological sort\n\t\ttopo = []\n\t\tqueue = [n for (n,c) in numChildren.iteritems() if c==0]\n\t\tif len(queue)==0:\n\t\t\traise ValueError(\"Bayes net is not acyclic?\")\n\t\twhile len(queue)>0:\n\t\t\tn = self.variables[queue.pop(0)]\n\t\t\ttopo.append(n)\n\t\t\tfor p in n.parents:\n assert numChildren[p]>0\n numChildren[p] -= 1\n if numChildren[p]==0:\n queue.append(p)\n\t\t#now reverse it to get the top down ordering\n assert len(topo)==len(self.variables)\n\t\treturn reversed(topo)", "def _root(self, ind):\n while (ind != self._id[ind]):\n #make every other node in path to point to its grandparent\n self._id[ind] = self._id[self._id[ind]]\n ind = self._id[ind]\n return ind", "def id(self):\n _id = super(ScheduleVisit, self).id\n return _id + 1", "def _position_to_id(self, x, y):\n return x + y * self.n", "def id(self):\n\t\treturn self.__id", "def get_node_id(node):\n node_id = re.search(r\"[\\d]+\", str(node))\n return node_id.group()", "def map_to_parent_nid(self, nid):\n ...", "def full_id(schema_obj):\n\n return '0x%08x' % ((schema_obj.parent.number << 16) | schema_obj.number)", "def min_tree_id(self) -> int:\n\n return min(self.tree_ids) if len(self.tree_ids)>0 else 0", "def __getIdHash(self, val):\n return PublicTransit.TANA_NODE_NUMBER_OFFSET + int((\"%1.0f\" % val)[7:])", "def id(self) -> int:\n\t\t# pylint: disable=invalid-name\n\t\treturn self._oid", "def test_id(self):\n node = Node()\n node.id = \"1234\"\n self.assertEqual(node.getId(), node.id)", "def getnode(self, id: int) -> node_data:\n return self.Nodes[id]", "def toroomid(the_id):\r\n if the_id < 100000000:\r\n the_id = the_id + 100000000\r\n return the_id", "def createTopologicalList(self):\n sortedList = list(self.node.items())\n sortedList.sort(key=lambda item : item[1].order)\n self.topologicalList = [i[0] for i in sortedList]\n \n # Add dummy element, since topological order starts at 1.\n self.topologicalList = [utils.NO_PATH_EXISTS] + self.topologicalList", "def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True", "def setNodeIdFromTitle(self, node_id):\n calcEngine = CalcEngine.factory(self.client_session)\n new_id = calcEngine.setNodeIdFromTitle(node_id)\n return new_id[\"node_id\"]", "def subtree_identity(self):\n if self.children:\n temp = [(str(edge), str(node.id), \"1\" if node.is_sequence_end else \"0\")\n for (edge, node) in self.children.items()]\n edge_string, id_string, status_string = \\\n zip(*temp)\n else:\n edge_string = id_string = status_string = \"\"\n\n # Join with underscores\n edge_string = \"r_\" + \"_\".join(edge_string)\n id_string = \"r_\" + \"_\".join(id_string)\n status_string = (\"1_\" if self.is_sequence_end else \"0_\") + \"_\".join(status_string)\n\n return edge_string, id_string, status_string", "def record_node(self, elements: frozenset) -> int:\n logging.debug('get node id from elements %s', str(elements))\n if elements not in self.elems2node:\n logging.debug('get node id from elements %s. new node! %s', str(elements), self.next_id)\n logging.debug('Clusters =%s ', str(self.clusters))\n self.elems2node[elements] = self.next_id\n self.clusters[self.next_id] = elements\n if len(elements)>1:\n # print('element in elements=', [element for element in elements])\n # print(\"momentum =\", np.asarray([self.momentum[frozenset({elem})] for elem in elements]))\n self.momentum[elements]= sum(np.asarray([self.momentum[frozenset({elem})] for elem in elements])) # Add the momentum of the leaves that compose the node\n # self.invariant_mass[self.next_id] =\n # elif len(elements)==1:\n # self.momentum[elements]= self.leaves_momentum[list(elements)[0]]\n\n self.next_id += 1\n return self.next_id - 1\n else:\n return self.elems2node[elements]", "def getID():", "def computed_id(o):\n\n if o.id is not None and o.id.startswith(namespace + \":\"):\n return o.id\n\n return \"{i.namespace}:{i.accession}\".format(i=computed_identifier(o))", "def __init__(self):\n self.id = id(self)", "def id(self):\n return self.__id", "def identity(self):\n return self.id", "def associate_node_id(tr, node=\"\"):\n return {\"id\": tr.get_uml_id(name=node)}", "def del_node (self, id):\n raise NotImplementedError", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id" ]
[ "0.64536446", "0.63846886", "0.60527354", "0.59763664", "0.59402263", "0.59402263", "0.5815118", "0.58136594", "0.58034563", "0.5800961", "0.5798843", "0.57527506", "0.5713669", "0.56929845", "0.56770647", "0.56754446", "0.5672043", "0.5670681", "0.56547654", "0.56452453", "0.5641452", "0.5640187", "0.56352615", "0.55637836", "0.5560429", "0.55455047", "0.55236757", "0.55186933", "0.5515146", "0.55059063", "0.54977083", "0.54805505", "0.5449281", "0.54373735", "0.5421782", "0.54076505", "0.53968525", "0.5392923", "0.5368891", "0.5327549", "0.53081036", "0.5299506", "0.5295915", "0.5289978", "0.52888227", "0.5282739", "0.5277322", "0.52773154", "0.5276332", "0.5274862", "0.5269363", "0.52641517", "0.52555317", "0.52514005", "0.5250099", "0.52488506", "0.5245498", "0.5231537", "0.52282965", "0.5226544", "0.5224015", "0.52233976", "0.52144766", "0.52042", "0.5194077", "0.5187431", "0.51850855", "0.5182777", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247", "0.5172247" ]
0.0
-1
Returns label used in dot representation.
def dot_label(self, detailed = True): if detailed: return '%s: %s' % (self.id_str(), escape_quote(str(self))) else: return '%s' % self.id_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_label(cls) -> str:\n return cls._meta.label_lower.split('.')[-1]", "def label(self):\n return self._label_shape", "def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)", "def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))", "def dot(self):\n return self.__dot", "def dot(self) -> str:\n dot = to_pydot(self._graph)\n return dot.to_string()", "def label(self) -> str:\n return self._underlying.label", "def label(self):\n return self._label_", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def _get_label(self):\n return self.label", "def label(self):\n return self.label_", "def label(self) -> str:\n return self[\"label\"]", "def label(self):\n return self.__label", "def label(self):\n return self.__label", "def get_label(self):\n return self.label", "def get_label(self):\n return self.label", "def get_label(self):\n return self.label", "def get_label(self):\n return self.label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def label(self):\n return self._label", "def to_label(self):\n return self.label", "def dot_label(self) -> str:\n rows = \"\\n\".join(field.dot_row() for field in self.fields)\n return _table_template.format(name=self.name, rows=rows).replace(\"\\n\", \"\")", "def get_label(self):\n return self._label", "def label(self):\r\n return self._label", "def GetLabel(self):\n \n return self.label_str", "def dot(self, name):\n nodes = \" \".join(\"_%s_%s;\" % (x, name) for x in self)\n edges = \" \".join(\n '_%s_%s -> _%s_%s [label=\"%.2f/%s\"];'\n % (s, name, t, name, self.get_score(s, t), self.get_label(s, t))\n for s, t in self.iteredges()\n )\n return \"digraph _%s {%s %s}\" % (name, nodes, edges)", "def get_label ( self ):\n return self.label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def dot_name(number):\n\tif number > 0:\n\t\treturn \"P {}\".format(number)\n\telse:\n\t\treturn \"O {}\".format(-number)", "def label(tree):\n return tree[0]", "def GetLabel(self):\r\n\r\n return self.label", "def GetLabel(self):\r\n\r\n return self.label", "def label(self) -> str:\r\n\r\n return self.__label", "def getLabel(self):\n return _libsbml.GeneProduct_getLabel(self)", "def dot_string(self) -> str:\n\n ret = \"{0}[label = \\\"{1}\\\"];\\n\".format(self._index, self.label)\n ret += \"{0}--{{\".format(self._index)\n ret += ''.join([f'{child.index} ' for child in self._children])\n ret += \"}\\n\"\n self._is_drawn = True\n ret += ''.join([child.dot_string() for child in self._children])\n\n return ret", "def get_label(self):\n\n return self._label", "def get_label(self, label):\n\n return torch.from_numpy(np.array(label)).long()", "def get_label(cls):\n return cls._type_name(cls.label)", "def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))", "def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))", "def get_label(cls):\r\n return cls._type_name(cls.label)", "def label(self):\r\n return self._name", "def get_computed_label(self, element):\n pass", "def GetLabel(self) -> str:\n return self._label", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def GetLabel(self):\r\n\r\n return self._label", "def _get_label(obj):\n # NOTE: BarContainer and StemContainer are instances of tuple\n while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:\n obj = obj[-1]\n label = getattr(obj, 'get_label', lambda: None)()\n return label if label and label[:1] != '_' else None", "def label(self):\r\n if isinstance(self.Lbeta, str):\r\n result = self.Lbeta\r\n else:\r\n result = 'T%.2d' % int(round(self.Lbeta))\r\n result += 'E%.2d' % int(round(self.E))\r\n result += 'G%.2d' % int(round(self.minTauG))\r\n result += self.insulation\r\n return result", "def Label(self) -> str:", "def label_name(self) -> str:\n return pulumi.get(self, \"label_name\")", "def labelName(self):\n if self.isRequired:\n return '%s*' % self.name\n return self.name", "def label_to_name(self, label):\n return self.labels[label]", "def label(self, decimals=None, base_label=None, cache=None):\n return super().label(decimals=decimals, base_label=base_label or \"∏_ϕ\", cache=cache)", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name", "def get_label_name(self):\n command_type = self.get_type()\n if command_type == LABEL_COMMAND_TYPE:\n return self.command[1:-1] # ignores the () at the beginning and the end\n if command_type != EMPTY_COMMAND_TYPE: # increments the line number if it is not a blank line or a label\n self.__line_number += 1", "def label(self) -> str:\n return self.__parameters.label", "def label_to_name(self, label):\n\t\t\treturn self.labels[label]", "def label_to_name(self, label):\n\t\treturn self.labels[label]", "def node_label(self):\n if (self.body in operators):\n return self.body.__name__\n else:\n return str(self.body)", "def label_to_name(label):\n return \"Tree\"", "def name_to_label(self, name):\n\t\treturn self.classes[name]", "def name_to_label(self, name):\n\t\t\treturn self.classes[name]", "def getLabel(self):\n return self.content[:12]", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def Label(self, default=None):\n return self.data.get('label', default)", "def Label(self, default=None):\n return self.data.get('label', default)", "def name_to_label(self, name):\n return self.classes[name]", "def label(self):\r\n return self._text", "def label(self):\n return self.address.label", "def label(self) -> str: # Enforcing every node defines a label\n pass", "def labelit(self, varname):\n \n if not varname:\n return \"\"\n return self.vardict[varname].VariableLabel or varname", "def N_dot_dot(self):\n return self._N_dot_dot_attr", "def genLabel(self):\n self._nextlabelid += 1\n return CLABEL(self._nextlabelid)", "def get_label(self):\n oshape = (ctypes.c_uint * 2)()\n ostride = ctypes.c_uint()\n ret = cxnlib.CXNIOGetLabel(self.handle,\n oshape, ctypes.byref(ostride))\n return ctypes2numpyT(ret, [x for x in oshape], 'float32', ostride.value)", "def __repr__(self):\n\n return self.get_label()", "def get_label(self):\n return openmediavault.string.unescape_blank(\n self.get_udev_property('ID_FS_LABEL_ENC', '')\n )", "def getLabel(self):\n result = self.content[:12]\n if result == \"\":\n if self.tags:\n result = str(self.tags.first)\n return result", "def label(self):\n return ''", "def get_label(self, key):\n return self.labels.get(key, None)", "def label(self):\n\n return self.identifier", "def _edgeLabel(self, node, parent):\r\n return self.word[node.idx + parent.depth: node.idx + node.depth]" ]
[ "0.7191946", "0.7056918", "0.70269096", "0.6999598", "0.6973575", "0.6969061", "0.690872", "0.6877721", "0.68696594", "0.68696594", "0.68696594", "0.6843672", "0.6832188", "0.682814", "0.68096936", "0.68096936", "0.680629", "0.680629", "0.680629", "0.680629", "0.6804637", "0.6804637", "0.6804637", "0.6804637", "0.6804637", "0.6804637", "0.6804637", "0.6804637", "0.6804637", "0.6804637", "0.6796726", "0.6783609", "0.67789364", "0.6775348", "0.6771417", "0.67293453", "0.67283607", "0.6710832", "0.6710832", "0.6710832", "0.6710832", "0.6710832", "0.6710832", "0.6710832", "0.6682251", "0.667742", "0.66684663", "0.66684663", "0.6667652", "0.6667634", "0.6643357", "0.6632761", "0.6631366", "0.66201", "0.6611373", "0.6611373", "0.6604765", "0.66029257", "0.6588504", "0.65651244", "0.65648323", "0.65639454", "0.6554752", "0.6546156", "0.65309125", "0.65301734", "0.6527105", "0.65058947", "0.65001035", "0.6488848", "0.6484583", "0.6476494", "0.6469858", "0.6464033", "0.6459779", "0.6409319", "0.64081883", "0.6403301", "0.63920915", "0.6388103", "0.6388103", "0.6388103", "0.6388103", "0.63855743", "0.63855743", "0.6380479", "0.6379326", "0.63718045", "0.63687533", "0.6362516", "0.6348926", "0.6335625", "0.6333581", "0.63291085", "0.6324943", "0.6318655", "0.6314296", "0.6310464", "0.6305268", "0.6301463" ]
0.6813281
14
dot language representation of this node and its incoming edges
def dot(self, color='', detailed=True): result = 'n%s [label="%s" style="filled" color="%s"];\n' % \ (self.id_str(), self.dot_label(detailed=detailed), color) # write hyperedges for i, edge in enumerate(self.incoming): edgename = 'e%s_%s' % (self.id_str(), i) # graph node for hyperedge result += '%s [shape="point"]\n' % edgename # hyperedge head result += '%s -> n%s [label="%s"]\n' % \ (edgename, edge.head.id_str(), escape_quote(str(edge)) if detailed else '') # hyperedge tails for tailnode in edge.tail: result += 'n%s -> %s [dir="none"]\n' % \ (tailnode.id_str(), edgename) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_dot_str(self) -> Text:\n s = []\n s.append(\"digraph {\")\n for node in self.nodes:\n label = str(node)\n if node in self.start_nodes:\n label += \"S\"\n if node in self.accept_nodes:\n label += \"A\"\n s.append(f' \"{node}\" [label=\"{label}\"];')\n s.append(\"\")\n for from_node, transitions in self.nodes.items():\n for transition, to_nodes in transitions.items():\n if not transition:\n transition = \"&epsilon;\"\n for to_node in to_nodes:\n s.append(f' \"{from_node}\" -> \"{to_node}\" [label=\"{transition}\"];')\n s.append(\"}\")\n return \"\\n\".join(s)", "def dot(self) -> str:\n dot = to_pydot(self._graph)\n return dot.to_string()", "def __repr__(self):\n s = f\"GraphViaEdges(name={repr(self.name)}, \"\n s += f\"edges={repr(self.edges)})\"\n\n return s", "def _to_dot(self, detailed=False):\n g = ast_to_labeled_graph(self, detailed)\n import tulip.graphics as _graphics\n return _graphics.networkx_to_graphviz(g)", "def __repr__(self):\n return repr((self.head_vertex, self.tail_vertex, self.weight))", "def __repr__(self):\n return repr((self.head_vertex, self.tail_vertex, self.weight))", "def __str__(self):\n stringRepresentation = []\n for node in self.getNodes():\n stringRepresentation.append(\"->\".join(\n (str(node), str(self.graph[node]))))\n\n return str(stringRepresentation)", "def __repr__(self):\n return repr((self.head_vertex, self.tail_vertex))", "def to_dot(self, name='BDD'): # pragma: no cover\n\t\t# print(\"to_dot\")\n\t\tparts = ['graph', name, '{']\n\t\tfor node in self.dfs_postorder():\n\t\t\tif node is BDDNODEZERO:\n\t\t\t\tparts += ['n' + str(id(node)), '[label=0,shape=box];']\n\t\t\telif node is BDDNODEONE:\n\t\t\t\tparts += ['n' + str(id(node)), '[label=1,shape=box];']\n\t\t\telse:\n\t\t\t\tv = _VARS[node.root]\n\t\t\t\tparts.append('n' + str(id(node)))\n\t\t\t\tparts.append('[label=\"{}\",shape=circle];'.format(v))\n\t\tfor node in self.dfs_postorder():\n\t\t\tif node is not BDDNODEZERO and node is not BDDNODEONE:\n\t\t\t\tparts += ['n' + str(id(node)), '--',\n\t\t\t\t\t\t 'n' + str(id(node.lo)),\n\t\t\t\t\t\t '[label=0,style=dashed];']\n\t\t\t\tparts += ['n' + str(id(node)), '--',\n\t\t\t\t\t\t 'n' + str(id(node.hi)),\n\t\t\t\t\t\t '[label=1];']\n\t\tparts.append('}')\n\t\treturn \" \".join(parts)", "def __repr__(self):\n s = [\"{} vertices, {} edges\\n\".format(self._V, self._E)]\n for v in range(self._V):\n s.append(\"%d : \" % (v))\n for w in self._adj[v]:\n s.append(\"%d \" % (w))\n s.append(\"\\n\")\n\n return \"\".join(s)", "def dot(self, name):\n nodes = \" \".join(\"_%s_%s;\" % (x, name) for x in self)\n edges = \" \".join(\n '_%s_%s -> _%s_%s [label=\"%.2f/%s\"];'\n % (s, name, t, name, self.get_score(s, t), self.get_label(s, t))\n for s, t in self.iteredges()\n )\n return \"digraph _%s {%s %s}\" % (name, nodes, edges)", "def __str__(self):\n s = f\"GraphViaEdges '{self.name}',\\nedges :\\n\"\n for edge, edgetype in self.edges.items():\n s += f\" {edge[0]} {edgetype.value} {edge[1]}\\n\"\n\n return s", "def gen_graph(self):", "def __repr__(self):\n return 'Edge(%s, %s)' % (repr(self[0]), repr(self[1]))", "def __repr__(self):\n return str(self.nodes)", "def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')", "def __repr__(self) -> str:\n if self._has_direction:\n return (\n f\"<Edge: from {self._start} to {self._end} \"\n f\"with label '{self._label}'>\"\n )\n return (\n f\"<Edge: between {self._start} and {self._end} \"\n f\"with label '{self._label}'>\"\n )", "def generate_graph_dot(self, dot_path):\n\n dot_str = []\n visited_cells = set()\n queue = self.sources[:]\n\n # BFS through graph\n while len(queue) > 0:\n cell = queue.pop(0)\n if cell in visited_cells:\n continue\n visited_cells.add(cell)\n\n nexts = cell.get_nexts()\n queue.extend(nexts)\n \n # Encode cell\n nexts_names = [next.name for next in nexts]\n nexts_str = \"\\t%s -> { %s }\\n\" % (cell.name, \" \".join(nexts_names))\n dot_str.append(nexts_str) \n\n # Write encoded graph to file\n dot_str = \"strict digraph {\\n\" + \"\".join(dot_str) + \"}\"\n try:\n with open(dot_path, 'w') as dot_file:\n dot_file.write(dot_str)\n except:\n raise EnvironmentError(\"Unable to open %s\" % (dot_path))", "def __repr__(self):\n return 'words: %s, keys: %s' % (self.words, self.nodes.keys())", "def getEdges(self):\n # for node in graph,\n # return node -> node for j in graph[node]\n\n return [\"->\".join([str(n1), str(n2)]) for n1 in self.graph.keys() for n2 in self.graph[n1]]", "def dump_graph(self):\n # TODO\n return", "def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))", "def print_model_graph(self, name=None, agent=([], [], [])):\n dot = pygraphviz.AGraph(directed=\"True\")\n for outp in list(self.outputs.keys()):\n dot.add_node(outp, pos=(outp[1:] + \",10\"), color=\"red\", label=outp + \", \" + str(self.outputs[outp].taking.size) + \"-\" + self.outputs[outp].taking.type)\n for inp in list(self.inputs.keys()):\n dot.add_node(inp, pos=(inp[1:] + \",0\"), color=\"blue\", label=inp + \", \" + str(self.inputs[inp].producing.size) + \"-\" + self.inputs[inp].producing.type)\n for comp in list(self.networks.keys()):\n dot.add_node(comp, label=comp + \"-\" + str(type(self.networks[comp].descriptor).__name__)[:-14] + \":\" + str(self.networks[comp].taking.size) + \"-\" + str(self.networks[comp].producing.size))\n\n for c in self.connections:\n con = self.connections[c]\n if self.conn_in_agent(con, agent[0]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"blue\")\n elif self.conn_in_agent(con, agent[1]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"red\")\n elif self.conn_in_agent(con, agent[2]):\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"green\")\n else:\n dot.add_edge(con.input, con.output, label=str(con.name) + \": \" + str(con.info.size) + \" \" + self.comp_by_ind(con.input).producing.type, color=\"black\")\n dot.layout('dot')\n if not name:\n name = str(hash(self))\n dot.draw(name + '.pdf')", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def __str__(self):\n built_string = \"Graph(\"\n built_string += str(self.get_nodes())\n built_string += \", \"\n built_string += str(self.get_edges())\n built_string += \")\"\n return built_string", "def __str__(self):\n built_string = \"Graph(\"\n built_string += str(self.get_nodes())\n built_string += \", \"\n built_string += str(self.get_edges())\n built_string += \")\"\n return built_string", "def __str__(self):\n\t\treturn str(self.graph)", "def __str__(self):\n return np.array2string(self.graph.toarray())", "def __repr__(self):\n return \"{}: {}\".format(self.nodeid, self.lemma)", "def show_edges(self):\n for element in self.graph:\n print(element, self.graph[element])", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def __repr__(self) -> str:\n return f\"Graph: |V|= {self.v_size()}, |E|= {self.e_size()}\"", "def __repr__(self):\n return 'Vertex(%s)' % repr(self.label)", "def __repr__(self):\n return 'Vertex(%s)' % repr(self.label)", "def graph(self):\n ...", "def graph(g):\n return str(g.adjacencyList())", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Node Name: \" + str(self.name) + \"\\n\" + \\\n \"Node Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"Incident Edges: \" + \"\\n\".join([edge.__str__() for edge in self.incident_edges]) + \"\\n\"", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Node Name: \" + str(self.name) + \"\\n\" + \\\n \"Node Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"Incident Edges: \" + \"\\n\".join([edge.__str__() for edge in self.incident_edges]) + \"\\n\"", "def gremlin(self):\r\n initial = '{} = g.makeType().name(\"{}\").{}{}makeEdgeLabel()'\r\n primary_key = ''\r\n if self.primary_key:\r\n primary_key = \"primaryKey({}).\".format(self.primary_key)\r\n\r\n functional = \"functional().\" if self.functional else \"\"\r\n\r\n return initial.format(self.label, self.label, primary_key, functional)", "def edge(self, viz_edge: VizEdge) -> None:\n # Take CallNode as an example, instead of \"arguments point to CallNode\",\n # we want \"CallNode points to arguments\" in ast-dump form.\n #\n # The direction of edge is typically controlled by the implemented VizParser.\n # Reverse start/end here simply because we leverage default parser implementation.\n if viz_edge.end in self._graph:\n self._graph[viz_edge.end].append(viz_edge.start)\n else:\n self._graph[viz_edge.end] = [viz_edge.start]", "def build_graph(self):\n pass", "def get(self):\n self.network = gt.load_graph(self.dotfile)\n\n if self.strongcomponent:\n self.network=gt.extract_largest_component(\n self.network, directed=True, prune=True)\n\n if self.removeselfloops:\n gt.remove_self_loops(self.network)\n\n self.nm = self.network.new_vertex_property(\"string\")\n nm2 = self.network.new_vertex_property(\"string\")\n self.hl = self.network.new_vertex_property(\"bool\")\n self.network.vertex_properties[\"text\"] = self.nm\n self.network.vertex_properties[\"text\"] = nm2\n names=[]\n for v in self.network.vertices():\n if v.out_degree() > -1:\n self.nm[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.hl[v]=False\n else:\n nm2[v]=self.short_name(\n self.network.vp.vertex_name[v],self.preflen)\n self.nm[v]=''\n self.hl[v]=False\n names=names+[nm2[v]]\n\n NAMES=pd.Series(list(set(names)),\n name='varclass').reset_index().set_index('varclass')\n self.varclass = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"varclass\"] = self.varclass\n for v in self.network.vertices():\n self.varclass[v]=NAMES.loc[nm2[v]].values[0]\n\n self.od = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.od\n for v in self.network.vertices():\n self.od[v]=self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=self.exponent)+5\n self.ods = self.network.new_vertex_property(\"float\")\n self.network.vertex_properties[\"size\"] = self.ods\n for v in self.network.vertices():\n self.ods[v]=1*self.f(v.out_degree(),\n A=self.minsize,\n E=self.exponentialscaling,\n exponent=1)+2\n\n self.ew = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight\"] = self.ew\n for e in self.network.edges():\n self.ew[e]=float(self.network.ep.weight[e])**1\n\n self.ew_pen = self.network.new_edge_property(\"float\")\n self.network.edge_properties[\"eweight_pen\"] = self.ew_pen\n for e in self.network.edges():\n self.ew_pen[e]=4/(1 + np.exp(-.05-np.fabs(float(self.network.ep.weight[e]))))\n\n self.e_marker = self.network.new_edge_property(\"string\")\n self.network.edge_properties[\"e_marker\"] = self.e_marker\n for e in self.network.edges():\n if float(self.network.ep.weight[e]) < 0:\n self.e_marker[e]='bar'\n else:\n self.e_marker[e]='arrow'\n\n self.deg = self.network.degree_property_map(\"out\")\n\n self.ecol = self.network.new_edge_property(\"vector<double>\")\n self.network.edge_properties[\"ecol\"] = self.ecol\n for e in self.network.edges():\n col=cm.ScalarMappable(mpl.colors.Normalize(vmin=-self.edgecollim,\n vmax=self.edgecollim),\n cmap=self.edgecolmap).to_rgba(float(self.ew[e]))\n col=list(col)\n col[3]=self.edgealpha\n self.ecol[e]=tuple(col)\n\n self.pos = gt.graphviz_draw(self.network,\n overlap=False,\n vsize=20,\n sep=self.nodesep,\n output=None)\n\n self.control = self.network.new_edge_property(\"vector<double>\")\n for e in self.network.edges():\n d = np.sqrt(np.sum((self.pos[e.source()].a\n - self.pos[e.target()].a) ** 2))\n d=d/2\n self.control[e] = [0.0,0.0,0, .2*d, 0.5, d,1,0]\n\n if self.outfile is not None:\n gt.graph_draw(self.network,nodesfirst=False,\n pos=self.pos,\n vertex_halo=self.hl,\n vertex_halo_color=[.2,.2,.2,.1],\n edge_pen_width=self.ew_pen,\n edge_end_marker=self.e_marker,\n vorder=self.deg,\n edge_marker_size=10,\n vertex_color=self.varclass,#[.5,.5,.5,.3],\n edge_color=self.ecol,#[.5,.5,.5,.5],\n vertex_pen_width=1.5,\n vertex_size=self.od,\n vertex_text=self.nm,\n vcmap=(self.cmap,self.alpha),\n edge_control_points=self.control,\n vertex_fill_color=self.varclass,#deg,\n vertex_font_size=self.ods,\n vertex_text_color=[.1,.1,.1,.8],\n #vertex_text_position=0,\n output=self.outfile)", "def __repr__(self):\n\n nodes = []\n current = self.head\n\n while current:\n if current is self.head:\n nodes.append('[Head: %s]' % current.data)\n elif current.next_node is None:\n nodes.append('[Tail: %s]' % current.data)\n else:\n nodes.append('[%s]' % current.data)\n current = current.next_node\n\n return '-> '.join(nodes)", "def __repr__(self: 'DotTree') -> str:\n return 'DotTree({}, {})'.format(repr(self.children[0]), \n repr(self.children[1]))", "def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]", "def graph(self, edge_labels='words_in_out'):\n if edge_labels == 'words_in_out':\n label_fct = lambda t:t._in_out_label_()\n elif hasattr(edge_labels, '__call__'):\n label_fct = edge_labels\n else:\n raise TypeError('Wrong argument for edge_labels.')\n\n graph_data = []\n isolated_vertices = []\n for state in self.iter_states():\n transitions = state.transitions\n if len(transitions) == 0:\n isolated_vertices.append(state.label())\n for t in transitions:\n graph_data.append((t.from_state.label(), t.to_state.label(),\n label_fct(t)))\n\n G = sage.graphs.digraph.DiGraph(graph_data, multiedges=True, loops=True)\n G.add_vertices(isolated_vertices)\n return G", "def create_dot(nodes, assocs, hierarchy):\n def field_names(fields):\n return ' | '.join(sorted(fields))\n out = StringIO()\n print >> out, \"digraph phemi_class_diagram {\"\n print >> out, \" node[shape=record];\"\n for clazz, fields in nodes.iteritems():\n print >> out, ' \"%s\" [label=\"{%s | %s}\"];' % (\n fullname(clazz), clazz.__name__, field_names(fields)\n )\n for edgemap in [assocs, hierarchy]:\n for clazz, edges in edgemap.iteritems():\n for edge in edges:\n print >> out, ' \"%s\" -> \"%s\" %s' % (\n fullname(clazz), fullname(edge.dst), edge.style\n )\n print >> out, \"}\"\n return out.getvalue()", "def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)", "def render(self): # pragma: no cover\n from graphviz import Digraph\n dot = Digraph(name=\"top\")\n for block in self.blocks:\n if isinstance(block, Branch):\n label = \"if \" + astor.to_source(block.cond)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"invhouse\"})\n elif isinstance(block, Yield):\n label = astor.to_source(block.value)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"oval\"})\n elif isinstance(block, BasicBlock):\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n elif isinstance(block, HeadBlock):\n label = \"Initial\"\n dot.node(str(id(block)) + \"_start\", label.rstrip(), {\"shape\": \"doublecircle\"})\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.initial_statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n dot.edge(str(id(block)) + \"_start\", str(id(block)))\n else:\n raise NotImplementedError(type(block))\n # for source, sink, label in self.edges:\n for sink, label in block.outgoing_edges:\n dot.edge(str(id(block)), str(id(sink)), label)\n\n\n file_name = tempfile.mktemp(\"gv\")\n dot.render(file_name, view=True)\n # with open(\"cfg.dot\", \"w\") as file:\n # file.write(dot.source)\n # exit()", "def _get_dot(self):\n if self.left is not None:\n yield \"\\t%s -> %s;\" % (self.val, self.left.val)\n for i in self.left._get_dot():\n yield i\n elif self.right is not None:\n r = random.randint(0, 1e9)\n yield \"\\tnull%s [shape=point];\" % r\n yield \"\\t%s -> null%s;\" % (self.val, r)\n if self.right is not None:\n yield \"\\t%s -> %s;\" % (self.val, self.right.val)\n for i in self.right._get_dot():\n yield i\n elif self.left is not None:\n r = random.randint(0, 1e9)\n yield \"\\tnull%s [shape=point];\" % r\n yield \"\\t%s -> null%s;\" % (self.val, r)", "def _get_dot(self):\n if self.left is not None:\n yield \"\\t%s -> %s;\" % (self.val, self.left.val)\n for i in self.left._get_dot():\n yield i\n elif self.right is not None:\n r = random.randint(0, 1e9)\n yield \"\\tnull%s [shape=point];\" % r\n yield \"\\t%s -> null%s;\" % (self.val, r)\n if self.right is not None:\n yield \"\\t%s -> %s;\" % (self.val, self.right.val)\n for i in self.right._get_dot():\n yield i\n elif self.left is not None:\n r = random.randint(0, 1e9)\n yield \"\\tnull%s [shape=point];\" % r\n yield \"\\t%s -> null%s;\" % (self.val, r)", "def build_graphviz(input_dim, output_dim, num_intermediate, \n connections, activations, activation_labels):\n \n if not is_valid_adjacency_matrix(connections, num_intermediate, input_dim, output_dim):\n raise ValueError(\"Connectivity matrix is invalid\")\n num_emitting = num_intermediate + input_dim\n num_receiving = num_intermediate + output_dim\n size = num_emitting + output_dim\n dag = graphviz.Digraph()\n #add nodes labeled by activation functions\n for i in range(size):\n node=str(i)\n if i < input_dim:\n label = \"input %d\" % i\n attrs = {}\n else:\n act_index = activations[i-input_dim].item()\n act_label = activation_labels[act_index]\n attrs = {\n 'activation_index': str(act_index),\n 'activation_label': str(act_label)\n } \n if i >= num_emitting:\n label = f\"output {i-num_emitting}\"\n else:\n label = None\n\n dag.node(node, label=label, **attrs)\n #add edges\n edgelist = []\n for i in range(num_receiving):\n rec_index = i + input_dim\n for emitting_index in range(min(rec_index, num_emitting)):\n if connections[i, emitting_index] > 0:\n edgelist.append((str(emitting_index), str(rec_index)))\n dag.edges(edgelist)\n act_mapping = {str(i) : activation_labels[i] for i in range(len(activation_labels))}\n dag.attr(**act_mapping)\n return dag", "def compute_edge_logits(self):", "def __repr__(self):\n return f\"EdgeType.{self.name}\"", "def pretty_print_equation(self):\n\n for n in self.nodes:\n # Get a list of tuples, first is the v\n parents = self.adj_inv[n]\n if len(parents) == 0:\n if self.binary:\n right_side = '{0,1}'\n else:\n right_side = 'N(0, 1)'\n else:\n right_side = ' + '.join(['{:.3f}*x_{}'.format(self.weights[i, n], i)\n for i in parents])\n \n right_side.replace('+ -', '-')\n print('x_{} = {}'.format(n, right_side))", "def _repr_(self):\n return 'A vertex at ' + repr(self.vector());", "def __str__(self):\n return self.__id__() + \" || \" + str(self.__node_a.name) + \" -> \" + str(self.__node_b.name)", "def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)", "def __repr__(self):\n string = \"ROOT: {}\\n\".format(self.root)\n string += \"roots: \"+\", \".join(str(x) for x in self.roots())+'\\n'\n for node in self:\n string += str(node)\n if node.incoming:\n string += \" ({})\\n\".format(\" \".join(str(x[1].nodeid) for x in node.incoming))\n else:\n string += \" (root)\\n\"\n for label, child in sorted(node.outgoing, key=lambda x:x[1].nodeid):\n string += \" {} {}\".format(child.nodeid, label) + '\\n'\n string += \"undirected:\\n\"\n for pair in self.undirected:\n string += \" {}\\n\".format(pair)\n return string", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Edge Weight: \" + str(self.weight) + \"\\n\" + \\\n \"Edge Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"First Incident Node: \\n\" + str(self.first_incident_node.get_name()) + \"\\n\" + \\\n \"Second Incident Node: \\n\" + str(self.second_incident_node.get_name()) + \"\\n\"", "def DotNode(self, node):\n if type(node) is dag.Node:\n node = self._graph.NodeInfo(node)\n color = self._ContentTypeToColor(node.ContentType())\n if node.Request():\n max_age = node.Request().MaxAge()\n shape = 'polygon' if max_age > 300 else 'oval'\n else:\n shape = 'doubleoctagon'\n styles = ['filled']\n if node.IsAd() or node.IsTracking():\n styles += ['bold', 'diagonals']\n return ('%d [label = \"%s\\\\n%.2f->%.2f (%.2f)\"; style = \"%s\"; '\n 'fillcolor = %s; shape = %s];\\n'\n % (node.Index(), node.ShortName(),\n node.StartTime() - self._global_start,\n node.EndTime() - self._global_start,\n node.EndTime() - node.StartTime(),\n ','.join(styles), color, shape))", "def _repr_(self):\n return 'A vertex at ' + repr(self._representation_vector);", "def dot(self):\n d = Digraph(comment=\"VP Tree\", format=\"png\")\n for parent, left, right in self.root.preorder():\n\n if isinstance(parent,VPTreeNonLeaf):\n d.node(str(parent.uid), \"\"\"VP Node:: Key={} Median Dist = {:2.2f}\n \"\"\".format(parent.pk, parent.median_dist))\n d.edge(str(parent.uid), str(left.uid))\n d.edge(str(parent.uid), str(right.uid))\n elif isinstance(parent,VPTreeLeaf):\n d.node(str(parent.uid), \"Leaf Node:: \"+str(parent.pk_list))\n else:\n raise Exception(\"something went wrong\")\n\n return d", "def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"", "def __str__(self):\n output = \"\"\n for v, neighbors in self.neighbors.items():\n neighbors = {u: self.weights[(v, u)] for u in neighbors}\n output += str(v) + \" -> \" + str(neighbors) + \"\\n\"\n return output", "def _build_graph(self):\n pass", "def __repr__(self):\n ret = \"\"\n if is_relation(self.root):\n ret += self.root + '('\n for index, obj in enumerate(self.arguments):\n ret += str(obj)\n if index != len(self.arguments)-1:\n ret += ','\n ret += ')'\n elif is_equality(self.root):\n ret = str(self.first) + self.root + str(self.second)\n elif is_quantifier(self.root):\n ret = self.root + str(self.variable) + '[' + str(self.predicate) + ']'\n elif is_unary(self.root):\n ret = self.root + str(self.first)\n elif is_binary(self.root):\n ret = '(' + str(self.first) + self.root + str(self.second) + ')'\n return ret\n # Task 7.2", "def __repr__(self):\n if hasattr(self,'f'):\n return \"<Node: f=%d, depth=%d, h=%d\\n%s>\" % (self.f,\n self.depth,\n self.h,\n self.state)\n else:\n return \"<Node: depth=%d\\n%s>\" % (self.depth,self.state)", "def __repr__(self):\n has_weights = any(True for k in self._weights.keys())\n \n ret = ''\n for key, value in sorted(self._edges.items()):\n ret += ' : '.join([repr(key), repr(value)])\n \n if has_weights:\n fs = frozenset\n ret += ' ' + repr([self._weights[fs((key, v))] for v in value])\n ret += '\\n'\n \n return ret.strip()", "def __str__(self):\n return \" {north} \\n{west} {east}\\n {south} \".format(**self.edges._asdict())", "def read_dot_file(dot_file_path):\n nodes = []\n edges = []\n with open(dot_file_path) as f:\n in_lines = f.readlines()\n for line in in_lines:\n # ignore arrow attributes\n line = line.split(sep=\"[\")[0]\n if \"->\" in line:\n split_list = line.split(sep=\"->\")\n # print(\"ffgg\", split_list)\n pa = split_list[0].strip()\n if pa not in nodes:\n nodes.append(pa)\n ch_list = split_list[1].split(\",\")\n ch_list = [x.strip().strip(\";\").strip() for x in ch_list]\n # print(\"ffgg\", pa)\n # print(\"ffgg\", ch_list)\n for ch in ch_list:\n edges.append((pa, ch))\n if ch not in nodes:\n nodes.append(ch)\n\n return nodes, edges", "def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))", "def get_dot(self):\n return \"digraph G{\\n%s}\" % (\"\" if self.val is None else (\n \"\\t%s;\\n%s\\n\" % (\n self.val,\n \"\\n\".join(self._get_dot())\n )\n ))", "def __repr__(self):\n\n return f\"<Node {self.data}>\"", "def __repr__(self):\n return 'Node({!r})'.format(self.data)", "def __str__(self):\n return \"->\".join([str(n.data) for n in self.as_list()])", "def __str__(self):\n s = \"--\\n\"\n for node in self:\n s += node.__str__() + \"\\n\"\n return s + \"--\"", "def __str__(self):\n out = [f'{v}: {self.adj_list[v]}' for v in self.adj_list]\n out = '\\n '.join(out)\n if len(out) < 70:\n out = out.replace('\\n ', ', ')\n return f'GRAPH: {{{out}}}'\n return f'GRAPH: {{\\n {out}}}'", "def __str__(self):\n return '{}({})'.format(self.__class__.__name__, dict(self._graph))", "def __str__(self):\n vList = []\n for vertex in self:\n vList.append(vertex.name)\n gStr = \"The DiGraph contains _vertices: {0}\".format(\" \".join(vList))\n return gStr", "def print(self):\n for i, v in enumerate(self._adj):\n if v:\n print(\"vertex {0}\".format(i))\n for e in v:\n print(e)\n print()", "def __repr__(self):\n return str((self.original, self.left, self.right))", "def build_graph(self):\n raise NotImplementedError", "def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'", "def plot(self):\n return self.graph(edge_labels='words_in_out').plot()", "def dot_format(out, graph, name=\"digraph\"):\n\n out.write(\"digraph %s {\\n\" % name)\n for step, deps in each_step(graph):\n for dep in deps:\n out.write(\" \\\"%s\\\" -> \\\"%s\\\";\\n\" % (step, dep))\n\n out.write(\"}\\n\")", "def __repr__(self):\r\n if self.node_leaf():\r\n return self.name\r\n return str(self.freq) + \" 0: [\" + str(self.zero_son) + \"]\" + \" 1: [\" + str(self.one_son) + \"]\"", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/27')", "def out_edges(self) -> List[str]:\n return list(self.proto.out_edges)", "def PrintGraph(self):\n # print(\"Graph has {} nodes and {} edges.\".format(Node.count, Edge.count))\n # print(\"Unique connected nodes:\")\n # for (a, b) in self.connections:\n # print(\"{},{}\".format(a.index, b.index))\n\n # print(f\"\\nAll edges : {[e.index for e in self.edges]}\")\n\n # print(\"\\nDegree of nodes\")\n\n # for node in self.nodes:\n # print(f\"D of {node.index} = {len(node.neighbours)}\")\n\n for node in self.nodes:\n print(\"{}. ({}, {})\".format(node.index, node.x, node.y))", "def make_edge_text(self):\n fmtstr = ub.codeblock(\n '''\n connect from {oport_abs_name}\n to {iport_abs_name}\n ''')\n parts = []\n for iport in self.iports.values():\n for oport in iport.connections:\n if oport is not None:\n part = fmtstr.format(\n oport_abs_name=oport.absname(),\n iport_abs_name=iport.absname(),\n )\n parts.append(part)\n text = '\\n'.join(parts)\n return text", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/26')", "def dot(self):\n return self.__dot", "def edgesFromVertex(u):\r\n edgeRepresentation = lambda v: f\"({u}, {v}, {self.getCapacity((u, v))}, {self.getFlow((u,v))})\"\r\n return \", \".join(map(edgeRepresentation, sorted(self.adjacent[u])))", "def __str__(self):\n name_str = \"node name is %s\\n\" % self.__name\n label_str = \"labels are %s\\n\" % str(self.__labels)\n propety_str = \"properties are %s\\n\" % str(self.__props)\n return name_str + label_str + propety_str", "def make_architecture_pydot_graph(layers, output_shape=True, fullinfo=True):\n import pydot\n node_dict = {}\n edge_list = []\n\n REMOVE_BATCH_SIZE = True\n\n alias_map = {\n 'Conv2DCCLayer': 'Conv',\n 'MaxPool2DCCLayer': 'MaxPool',\n 'LeakyRectify': 'LRU',\n 'InputLayer': 'Input',\n 'DropoutLayer': 'Dropout',\n 'FlattenLayer': 'Flatten',\n }\n\n def get_hex_color(layer_type):\n if 'Input' in layer_type:\n return '#A2CECE'\n if 'Conv' in layer_type:\n return '#7C9ABB'\n if 'Dense' in layer_type:\n return '#6CCF8D'\n if 'Pool' in layer_type:\n return '#9D9DD2'\n else:\n return '#{0:x}'.format(hash(layer_type + 'salt') % 2 ** 24)\n\n for i, layer in enumerate(layers):\n lines = []\n layer_type = '{0}'.format(layer.__class__.__name__)\n layer_type = alias_map.get(layer_type, layer_type)\n key = repr(layer)\n color = get_hex_color(layer_type)\n # Make label\n lines.append(layer_type)\n if fullinfo:\n attr = 'name'\n val = getattr(layer, attr, None)\n if val is not None:\n if len(val) < 3:\n lines[-1] += ' ({0})'.format(val)\n else:\n if val.lower() != layer_type.lower():\n # add name if it is relevant\n lines.append('{0}: {1}'.format(attr, val))\n\n for attr in ['num_filters', 'num_units', 'ds', 'axis'\n 'filter_shape', 'stride', 'strides', 'p']:\n val = getattr(layer, attr, None)\n if val is not None:\n lines.append('{0}: {1}'.format(attr, val))\n\n attr = 'shape'\n if hasattr(layer, attr):\n val = getattr(layer, attr)\n shape = val[1:] if REMOVE_BATCH_SIZE else val\n lines.append('{0}: {1}'.format(attr, shape))\n\n if hasattr(layer, 'nonlinearity'):\n try:\n val = layer.nonlinearity.__name__\n except AttributeError:\n val = layer.nonlinearity.__class__.__name__\n val = alias_map.get(val, val)\n lines.append('nonlinearity:\\n{0}'.format(val))\n\n if output_shape:\n outshape = layer.output_shape\n if REMOVE_BATCH_SIZE:\n outshape = outshape[1:]\n lines.append('Output shape:\\n{0}'.format(outshape))\n\n label = '\\n'.join(lines)\n # append node\n\n node_dict[key] = dict(name=key, label=label, shape='record',\n fillcolor=color, style='filled',)\n\n if hasattr(layer, 'input_layers'):\n for input_layer in layer.input_layers:\n edge_list.append([repr(input_layer), key])\n\n if hasattr(layer, 'input_layer'):\n edge_list.append([repr(layer.input_layer), key])\n\n #ut.embed()\n if ut.get_argflag('--nx-cnn-hack'):\n import networkx as nx\n import plottool as pt\n from matplotlib import offsetbox\n #import TextArea, AnnotationBbox\n #import matplotlib.offsetbox # NOQA\n import matplotlib as mpl\n\n #from pylab import rcParams\n #rcParams['figure.figsize'] = 20, 2\n\n #fig = pt.figure(figsize=(10, 2))\n #pt.plt.figure(figsize=(20, 2))\n\n mpl.offsetbox = offsetbox\n nx = nx\n G = netx_graph = nx.DiGraph()\n netx_nodes = [(key_, ut.delete_keys(node.copy(), ['name']))\n for key_, node in node_dict.items()]\n\n netx_edges = [(key1, key2, {}) for key1, key2 in edge_list]\n netx_graph.add_nodes_from(netx_nodes)\n netx_graph.add_edges_from(netx_edges)\n\n #netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR'\n netx_graph.graph.setdefault('graph', {})['rankdir'] = 'TB'\n #netx_graph.graph.setdefault('graph', {})['prog'] = 'dot'\n netx_graph.graph.setdefault('graph', {})['prog'] = 'dot'\n\n pos_dict = nx.nx_pydot.pydot_layout(netx_graph, prog='dot')\n # hack to expand sizes\n #pos_dict = {key: (val[0] * 40, val[1] * 40) for key, val in pos_dict.items()}\n node_key_list = ut.get_list_column(netx_nodes, 0)\n pos_list = ut.dict_take(pos_dict, node_key_list)\n\n artist_list = []\n offset_box_list = []\n\n ax = pt.gca()\n ax.cla()\n nx.draw(netx_graph, pos=pos_dict, ax=ax)\n for pos_, node in zip(pos_list, netx_nodes):\n x, y = pos_\n node_attr = node[1]\n textprops = {\n 'horizontalalignment': 'center',\n }\n offset_box = mpl.offsetbox.TextArea(node_attr['label'], textprops)\n artist = mpl.offsetbox.AnnotationBbox(\n offset_box, (x, y), xybox=(-0., 0.),\n xycoords='data', boxcoords=\"offset points\",\n pad=0.25, framewidth=True, bboxprops=dict(fc=node_attr['fillcolor']),\n #pad=0.1,\n #framewidth=False,\n )\n offset_box_list.append(offset_box)\n artist_list.append(artist)\n\n for artist in artist_list:\n ax.add_artist(artist)\n\n xmin, ymin = np.array(pos_list).min(axis=0)\n xmax, ymax = np.array(pos_list).max(axis=0)\n ax.set_xlim((xmin, xmax))\n\n fig = pt.gcf()\n fig.canvas.draw()\n #fig.set_size_inches(10, 3)\n\n #pt.update()\n\n # Superhack for centered text\n # Fix bug in\n # /usr/local/lib/python2.7/dist-packages/matplotlib/offsetbox.py\n # /usr/local/lib/python2.7/dist-packages/matplotlib/text.py\n for offset_box in offset_box_list:\n offset_box.set_offset\n #offset_box.get_offset\n #self = offset_box\n z = offset_box._text.get_window_extent()\n (z.x1 - z.x0) / 2\n offset_box._text\n T = offset_box._text.get_transform()\n A = mpl.transforms.Affine2D()\n A.clear()\n A.translate((z.x1 - z.x0) / 2, 0)\n offset_box._text.set_transform(T + A)\n\n #pt.update()\n #pt.draw()\n\n # MEGA HACK\n ut.show_if_requested()\n\n #nx.draw(netx_graph, pos=pos_dict, ax=ax, with_labels=True)\n #nx.draw_networkx(netx_graph, pos=pos_dict, ax=ax, node_shape='box')\n #pos_dict = nx.nx_agraph.graphviz_layout(netx_graph)\n # http://stackoverflow.com/questions/20885986/how-to-add-dots-graph-attribute-into-final-dot-output\n #for key, node in netx_nodes:\n # #node['labels'] = {'lbl': node['label']}\n # node['color'] = node['fillcolor']\n\n #from matplotlib.offsetbox import OffsetImage, AnnotationBbox\n if False:\n nx.get_node_attributes(netx_graph, key_)\n\n A = nx.to_pydot(G)\n #A.draw('color.png')\n print(A.to_string())\n #rankdir\n\n Z = nx.from_pydot(A)\n\n #nx.draw(Z)\n Zpos = nx.nx_pydot.pydot_layout(Z, prog='dot')\n nx.draw_networkx(Z, pos=Zpos)\n\n else:\n\n #pydot_graph = pydot.Dot('Network', graph_type='digraph')\n pydot_graph = pydot.Dot('Network', graph_type='digraph', rankdir='LR')\n\n pydot_node_dict = dict([\n (node['name'], pydot.Node(**node))\n for node in node_dict.values()\n ])\n for pydot_node in pydot_node_dict.values():\n pydot_graph.add_node(pydot_node)\n\n for edge in edge_list:\n pydot_graph.add_edge(\n pydot.Edge(pydot_node_dict[edge[0]], pydot_node_dict[edge[1]]))\n return pydot_graph", "def _get_full_graph(self):", "def _intermediary_to_dot(tables, relationships):\n t = '\\n'.join(t.to_dot() for t in tables)\n r = '\\n'.join(r.to_dot() for r in relationships)\n return '{}\\n{}\\n{}\\n}}'.format(GRAPH_BEGINNING, t, r)", "def __str__(self):\n s = ''\n for vertex in self.vertices:\n s += vertex.__str__()\n s += \"\\n\"\n return s", "def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")" ]
[ "0.6938999", "0.6668665", "0.66680014", "0.6605425", "0.65282595", "0.65282595", "0.64935935", "0.64716935", "0.64622414", "0.6457543", "0.640081", "0.635242", "0.6327601", "0.6325863", "0.6287123", "0.6281614", "0.6276902", "0.62522554", "0.62207454", "0.6217396", "0.61819017", "0.61813897", "0.61744577", "0.6167146", "0.6167126", "0.6167126", "0.6087866", "0.608542", "0.6078608", "0.6069375", "0.6067209", "0.6033124", "0.60243565", "0.60243565", "0.60218084", "0.5999887", "0.59709615", "0.59709615", "0.59344894", "0.5926517", "0.59176326", "0.58897173", "0.5886209", "0.58773446", "0.58727515", "0.5872456", "0.5870289", "0.5862854", "0.5840744", "0.5838382", "0.5838382", "0.5831607", "0.58084846", "0.5808289", "0.58023995", "0.5800021", "0.57976097", "0.5797104", "0.5788987", "0.57869565", "0.57756627", "0.5770109", "0.5751018", "0.5743079", "0.5724598", "0.5720542", "0.57066596", "0.5704931", "0.570327", "0.5701052", "0.56972086", "0.5692561", "0.5692561", "0.5688388", "0.56859213", "0.5671029", "0.5658294", "0.565784", "0.5656616", "0.5651121", "0.5641797", "0.5629815", "0.5628434", "0.56169033", "0.5613656", "0.5610889", "0.5605", "0.56047726", "0.5603901", "0.5602753", "0.55978125", "0.5597603", "0.55941015", "0.55884784", "0.55846685", "0.55808777", "0.5577582", "0.55753845", "0.5573318", "0.556759" ]
0.6186007
20
return a set of nodes who are within max_dist of self
def neighbors(self, max_dist=3): # TODO: this may have problems because the set doesn't # compare object id but uses user defined comparison methods # TODO: outgoing edges are no longer saved found = set() found.add(self) queue = [(self, 0)] while queue: node, d = queue.pop(0) if d < max_dist: for edge in node.outgoing: if edge.head not in found: found.add(edge.head) queue.append((edge.head, d+1)) for edge in node.incoming: for tailnode in edge.tail: if tailnode not in found: found.add(tailnode) queue.append((tailnode, d+1)) return found
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def getMaximumDistances(self):\n pass", "def find(self, value, max_distance):\n\t\t# type: (Any, int) -> List[Tuple[int, Any]]\n\n\t\tnode = self.root\n\t\tret = [] # type: List[Tuple[int, Any]]\n\n\t\tif node is None:\n\t\t\treturn ret\n\n\t\tcandidates = [node] # is a deque better here?\n\n\t\twhile candidates:\n\t\t\tcandidate = candidates.pop()\n\t\t\tdistance = self.distance_func(value, candidate.value)\n\n\t\t\tif distance <= max_distance:\n\t\t\t\tret.append((distance, candidate.value))\n\n\t\t\t# instead of looking for candidates by searching,\n\t\t\t# one could also directly access the necessary keys in the dict\n\t\t\tfor d, bknode in candidate.leaves.items():\n\t\t\t\tlower = distance - max_distance\n\t\t\t\tupper = distance + max_distance\n\t\t\t\tif lower <= d <= upper:\n\t\t\t\t\tcandidates.append(bknode)\n\n\t\treturn ret", "def getNeighbors(self):\n targets = set()\n for arc in self._arcsFrom:\n targets.add(arc.getFinish())\n return [ node for node in sorted(targets) ]", "def greedy_max_cut(graph):\n cut = Cut(set(), set())\n for vertex in graph.nodes:\n l_neighbors = sum((adj in cut.left) for adj in graph.neighbors(vertex))\n r_neighbors = sum((adj in cut.right) for adj in graph.neighbors(vertex))\n if l_neighbors < r_neighbors:\n cut.left.add(vertex)\n else:\n cut.right.add(vertex)\n return cut", "def findClosestNodes(self, target: hash.hash.Hash):\n # TODO: make more efficient\n # See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table\n \n nodes = []\n \n for bucket in self.buckets:\n nodes = nodes + bucket.nodes\n\n nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))\n\n return nodes[:config.K]", "def find_max(self):\n return max(self.nodes, key=int)", "def get_min_max_electrode_distances(self):\n distances = pdist(self.get_electrode_positions())\n return distances.min(), distances.max()", "def get_neighbours(self):\n return []", "def eligible_edges(self):\n return self.edges", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def max_cliques(self):\n possible = frozenset(self.vertices())\n acc = frozenset()\n excluded = frozenset()\n cliques = []\n degeneracy_ordered_vertices = self.degeneracy_ordering()\n for v in degeneracy_ordered_vertices:\n neighbors_of_v = self.neighbors(v)\n self._bron_kerbosch(\n acc.union({v}),\n possible.intersection(neighbors_of_v),\n excluded.intersection(neighbors_of_v),\n cliques)\n possible = possible.difference({v})\n excluded = excluded.union({v})\n return cliques", "def find_targetnodes(self):\n\n self.connect_backwards()\n\n targetnodes = []\n for n in self.find_datanodes():\n if len(n.receives_from) > 0:\n targetnodes.append(n)\n return targetnodes", "def get_interest_nodes(self):\n # go through each node in the network to find the min and max degrees\n max_value = 0\n min_value = len(self.nodes)\n for name in self.nodes:\n\n # check for new max\n if self.nodes[name].get_degree() >= max_value:\n\n max_value = self.nodes[name].get_degree()\n\n self.max_node = name\n\n # check for new min\n elif self.nodes[name].get_degree() <= min_value:\n\n min_value = self.nodes[name].get_degree()\n\n self.min_node = name\n\n return self.max_node, self.min_node", "def _get_traversable_nodes(self):\n nodes = self.local_environment.get_node_neighborhood(self.location)\n potential_nodes = [node for node in nodes if self.local_environment.get_node_deadline(node) >= 0]\n edges_to_potential_nodes = self.local_environment.graph.edges(self.location)\n\n for single_edge in edges_to_potential_nodes:\n\n # if edge is blocked\n if self.local_environment.graph[single_edge[0]][single_edge[1]][\"blocked\"]:\n potential_nodes.remove(single_edge[1])\n return potential_nodes", "def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]", "def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts", "def maximumDistance(self):\n from ete2 import Tree\n t = Tree(name='LUCA_root')\n empty_forest = {'sp':t,'gns':t,'fam':t,'ord':t,'cls':t,'phy':t,'kng':t}\n return self.distanceToTree(empty_forest,update_inner_attributes=False)", "def search_coord_with_max_nanobots(self, mins, maxs, fully_in_range, maybe_partially_in_range, best_count=0):\n # Figure out how many of maybe_partially_in_range are actually in range of this whole cube\n # or if they're completely out of range\n cube = OctreeNode(mins, maxs)\n new_fully_in_range = fully_in_range.copy()\n new_partially_in_range = []\n for nanobot in maybe_partially_in_range:\n if cube.nanobot_in_range_of_whole_node(nanobot):\n new_fully_in_range.append(nanobot)\n elif cube.in_node(nanobot.coord) or cube.in_range_if_outside(nanobot):\n new_partially_in_range.append(nanobot)\n\n # If we're not potentially at least as good as best_count, no results worth returning\n if len(new_fully_in_range) + len(new_partially_in_range) < best_count:\n return []\n\n # If none are partially in range, we know the answer for this node!\n if not new_partially_in_range:\n return [SearchResult(mins, maxs, len(new_fully_in_range))]\n\n # If this node is only 0 or 1 units long in each direction, we can't subdivide\n big_enough = False\n for axis in range(3):\n if maxs[axis] - mins[axis] > 1:\n big_enough = True\n\n all_results = []\n if not big_enough:\n # Manually test all 8 corners (ignoring duplicate corners, if any)\n points_tested = set()\n for corner in itertools.product(*zip(mins, maxs)):\n if corner not in points_tested:\n points_tested.add(corner)\n new_best_count = len(new_fully_in_range) + len([nanobot for nanobot in new_partially_in_range\n if manhattan_dist(nanobot.coord, corner) <= nanobot.r])\n if new_best_count >= best_count:\n best_count = new_best_count\n all_results += [SearchResult(corner, corner, new_best_count)]\n \n else:\n # Otherwise, divide into 8 subcubes and recursively search\n midpoint = []\n for axis in range(3):\n midpoint.append((mins[axis] + maxs[axis]) // 2)\n\n axis_coords = list(zip(mins, midpoint, maxs))\n for corner_index in itertools.product(*zip([0, 0, 0], [1, 1, 1])):\n subcube_mins = []\n subcube_maxs = []\n for axis in range(3):\n subcube_mins.append(axis_coords[axis][corner_index[axis]])\n subcube_maxs.append(axis_coords[axis][corner_index[axis] + 1])\n\n results = self.search_coord_with_max_nanobots(subcube_mins,\n subcube_maxs,\n new_fully_in_range,\n new_partially_in_range,\n best_count)\n \n # Result counts should all be the same\n if results and results[0].count >= best_count:\n all_results += results\n\n # Keep the result(s) with the highest count\n return [result for result in all_results if result.count == best_count]", "def selection(self):\n bestScore = -10000000.0\n bestChildren = None\n\n for child in self.childNodes:\n score = child.wins / child.visits + np.sqrt(2) * np.sqrt(\n np.log(self.visits) / child.visits)\n if score > bestScore:\n bestChildren = child\n bestScore = score\n return bestChildren", "def neighbours(self):\n return [x.node for x in self.edges]", "def neighbours(self):\n seen = set()\n return [l.other(self) for l in self.dovetails \\\n if id(l) not in seen and not seen.add(id(l))]", "def getNeighbors(self, current: MstarNode):\n neighbors = []\n options = []\n # Loop over all the agents\n for i in range(self.n_agents):\n node: Node = current.nodes[i]\n options_i = []\n if i in current.collision_set:\n # If the agent in the collision set we add the current node as well as all possible nodes\n options_i.append(node)\n (x, y) = node.position\n moves = {0: (x, y - 1), 90: (x + 1, y), 180: (x, y + 1), 270: (x - 1, y)}\n options_i.append(Node(node.position, node, node.rotation + 90, node.h))\n options_i.append(Node(node.position, node, node.rotation - 90, node.h))\n if self.grid[moves[node.rotation][1]][moves[node.rotation][0]] == 0:\n options_i.append(Node(moves[node.rotation], node, node.rotation,\n self.heuristic(i, moves[node.rotation], node.rotation)))\n else:\n # If the agent is not in the collision set we add only the optimal following node\n try:\n if (node, self.goal.nodes[i]) in self.policy:\n nextPos = self.policy[(node, self.goal.nodes[i])]\n else:\n nextPos = Astar(self.grid, node, self.goal.nodes[i]).solve()\n self.policy[(node, self.goal.nodes[i])] = nextPos\n except ValueError:\n print(f\"start: {node}, goal: {self.goal.nodes[i]}\")\n raise RuntimeError()\n options_i.append(Node(nextPos[0], node, nextPos[1], self.heuristic(i, nextPos[0], nextPos[1])))\n options.append(options_i)\n # Take the cartesian product to get all options\n for element in itertools.product(*options):\n neighbors.append(list(element))\n return neighbors", "def get_reachable_nodes(self, source: Node) -> Set[Node]:\n nodes_found: Set[Node] = {source}\n queue = [source]\n while queue:\n v = queue.pop()\n for e in v.outgoing_edges:\n if e.node_to not in nodes_found:\n nodes_found.add(e.node_to)\n queue.append(e.node_to)\n return nodes_found", "def get_neighbours(kmer, max_d):\n assert (is_dna(kmer))\n result = set([kmer])\n for i in range(max_d):\n addded = set()\n for kmer in result:\n addded |= _get_neighbours(kmer)\n result |= addded\n return result", "def find_near_nodes(self, new_node):\n number_nodes = len(self.node_list) + 1\n r = self.connect_circle_dist * math.sqrt(\n (math.log(number_nodes) / number_nodes)\n )\n\n # if expand_dist exists, search vertices in a range no more than expand_dist\n if hasattr(self, \"expand_dis\"):\n r = min(r, self.expand_dis)\n dist_list = [\n (node.x - new_node.x) ** 2 + (node.y - new_node.y) ** 2\n for node in self.node_list\n ]\n near_indexes = [dist_list.index(i) for i in dist_list if i <= r ** 2]\n return near_indexes", "def greedy(self):\n # for each node, find the incoming link with the highest score.\n max_scores = {}\n max_sources = {}\n for source, target in self.iteredges():\n score = self.get_score(source, target)\n max_score = max_scores.get(target)\n if max_score is None or score > max_score:\n max_scores[target] = score\n max_sources[target] = source\n # then build a graph out of just these links.\n succs = dict((n, []) for n in self)\n for target, source in max_sources.items():\n succs[source].append(target)\n return Digraph(succs, self.get_score, self.get_label)", "def one_dimension_val_clutering(vals, max_distance=5):\n vals = sorted(vals)\n clusters = []\n for (idx, i) in enumerate(vals):\n cluster = [j for j in vals if abs(j - i) < max_distance]\n clusters.append(cluster)\n clusters = sorted(clusters, key=len, reverse=True)\n cluster = clusters[0]\n if len(cluster) / len(vals) > 0.6 or len(cluster) >= 3:\n return cluster\n else:\n return []", "def select_leaf(self):\n current = self\n best_child = None\n selected_nodes_R = 0\n while current.isExpanded:\n maxUCT = - float('inf')\n for child in current.children.values():\n UCT = child.compute_uct()\n if UCT > maxUCT:\n maxUCT = UCT\n best_child = child\n\n current = best_child\n selected_nodes_R += current.score\n return current, selected_nodes_R", "def _max_cardinality_search(self, mask):\n n = mask.shape[0]\n cliques = [[]] # maintains the list of cliques\n last_mark = -1 # number of marked neighbors for prev. node\n marks = [[] for i in range(n)] # a set tracking the marked neighbors of each node\n mark_size = np.zeros(n) # number of marked neighbors for each node\n remaining = list(range(n))\n for _ in reversed(range(n)):\n node = remaining[np.argmax(mark_size[remaining])]\n if mark_size[node] <= last_mark: # moving into a new clique\n cliques.append(marks[node] + [node])\n else: # add it to the last clique\n cliques[-1].append(node)\n nb_node = np.nonzero(mask[node,:])[0] # neighbors of node\n for nb in nb_node: # update the marks for neighbors\n marks[nb].append(node)\n mark_size[nb] += 1\n last_mark = mark_size[node]\n remaining.remove(node)\n sorted_cliques = [sorted(c) for c in cliques]\n return sorted_cliques", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def collect_nodes(self):\n free_nodes = Node.query.filter_by(project_id=None).all()\n return free_nodes", "def getCloseSubset(self, arg, dist_arg):\n\n curr = self.root\n while not isinstance(curr,VPTreeLeaf):\n d = dist_arg(curr.pk, arg)\n if d < curr.median_dist:\n curr = curr.left_child\n else:\n curr = curr.right_child\n\n return curr.pk_list", "def get_neighbours(self):\n return self.neighbours", "def nodes_with_m_nbrs(G,m):\n nodes = set()\n \n # Iterate over all nodes in G\n for n in G.nodes():\n \n # Check if the number of neighbors of n matches m\n if len(G.neighbors(n)) == m:\n \n # Add the node n to the set\n nodes.add(n)\n \n # Return the nodes with m neighbors\n return nodes", "def max_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max_nodes\")", "def maxima(self, nodeids: Optional[List[str]] = None) -> List[str]:\n\n if nodeids is None:\n nodeids = list(self.graph.nodes)\n\n return [nid for nid in nodeids\n if all(e[0] == nid\n for e in self.graph.edges\n if e[0] in nodeids\n if e[1] in nodeids\n if nid in e)]", "def nodes_with_m_nbrs(G, m):\n nodes = set()\n\n # Iterate over all nodes in G\n for n in G.nodes():\n\n # Check if the number of neighbors of n matches m\n if len(G.neighbors(n)) == m:\n\n # Add the node n to the set\n nodes.add(n)\n\n # Return the nodes with m neighbors\n return nodes", "def get_known(self):\n pool = set()\n\n # Add neighbours:\n pool.update(self.neighbours)\n\n # Add fingers:\n for f in SUCC_FINGERS:\n pool.update(self.best_finger_succ[f])\n for f in PRED_FINGERS:\n pool.update(self.best_finger_pred[f])\n return list(pool)", "def neighbors(node, test_set):\r\n result = set()\r\n for neighbor in node.neighbors:\r\n if neighbor in test_set:\r\n result.add(neighbor)\r\n return result", "def find_closest_patients(self,max_dist):\n\n centre_loc = self.location\n pats = User.objects.filter(assigned_centre_id__lt=1)\n\n plocs = np.zeros((len(pats), 2), dtype=np.float32)\n pids = np.zeros(len(pats), dtype=int)\n for idx,pat in enumerate(pats):\n plocs[idx,:] = pat.location\n pids[idx] = (pat.id)\n\n dists = np.linalg.norm(centre_loc - plocs, ord=2, axis=-1)\n in_range = (dists <= max_dist)\n\n return pids[in_range].tolist()", "def get_neighbors(self, end_distance, start_distance=0):\n return self.grid.get_neighbors(self, end_distance=end_distance, start_distance=start_distance)", "def _filter_max_dist_in_element(self, simplices):\n if self.max_dist_in_element is None:\n return simplices\n\n filtered = []\n for tup in simplices:\n dists = []\n for root in tup:\n new_dist = self.cluster.get_distances(root, tup)\n dists += list(new_dist)\n\n if max(dists) < self.max_dist_in_element:\n filtered.append(tup)\n return filtered", "def near(self, pose):\n # type: (Pose) -> Set[Pose]\n return [p for p in self.nodes if self.dist(p, pose) <= self.r_near]", "def rangeQuery(self, x):\n \n neighbors = []\n \n for y in range(len(self.df)):\n q = self.df[y, :2]\n if self.dist(x, q) <= self.epsilon:\n neighbors.append(y)\n \n return neighbors", "def neighbours_L(self):\n seen = set()\n return [l.other(self) for l in self.dovetails_L \\\n if id(l) not in seen and not seen.add(id(l))]", "def neighbors(self) -> List['Node']:\r\n self._load_neighbors()\r\n return [edge.source if edge.source != self else edge.target\r\n for edge in self._neighbors.values()]", "def _find_neighbours(self):\n\n neighbours = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_0 - p, axis=1) <= self.R_n)[0]\n # delete self index\n index = np.argwhere(nearests==i)\n nearests = np.delete(nearests, index)\n neighbours.append(nearests)\n\n return neighbours", "def max_edges(self):\n return self._max_edges", "def get_neighbours(self):\n return self._neighbours", "def neighbours_R(self):\n seen = set()\n return [l.other(self) for l in self.dovetails_R \\\n if id(l) not in seen and not seen.add(id(l))]", "def eligible_edges(self):\n if len(self.edges) == 4:\n return [self.edges[0], self.edges[2]]\n return []", "def branches(self):\n unique_nodes, unique_counts = np.unique(self.edges, return_counts=True)\n return unique_nodes[ unique_counts >= 3 ]", "def uct_select_child(self):\n s = sorted(self.child_nodes, key=lambda c: c.Q + sqrt(\n 2 * log(self.visits) / c.visits))[-1]\n return s", "def get_coreset(self, pool_features, labelled_features):\n new_batch = []\n self.update_distances(pool_features, labelled_features, reset_dist=True)\n for _ in range(self.num_query):\n # choose furthest point\n ind = np.argmax(self.min_distances)\n # New examples should not be in already selected since those points\n # should have min_distance of zero to a cluster center.\n assert ind not in new_batch\n # update distances with this point\n self.update_distances(pool_features, pool_features[ind, :].reshape(1,-1), reset_dist=False)\n new_batch.append(ind)\n print(f\"Maximum distance from cluster centers is {max(self.min_distances)}\")\n\n return new_batch", "def get_leaf_coordinate_points(self, max_dimensions=None):\n names, dist_mat = self.get_distance_matrix()\n num_leaves = len(names)\n sqrd_dist = np.square(dist_mat)\n # Generate the positive semi-definite square matrix M:\n m_mat = np.zeros((num_leaves, num_leaves), dtype='float')\n for i in range(num_leaves):\n di1 = sqrd_dist[0,i]\n for j in range(i, num_leaves):\n m = di1 + sqrd_dist[0,j] - sqrd_dist[i,j]\n m_mat[i,j] = m\n m_mat[j,i] = m\n m_mat /= 2.0\n # An eigenvalue decomposition of M yields the coordinate points:\n values, vectors = np.linalg.eigh(m_mat)\n tokeep = max(len(values) - max_dimensions, 0) if max_dimensions else 0\n values, vectors = values[tokeep:], vectors[:,range(tokeep, len(values))]\n coords = np.column_stack(vectors[:,i]*np.sqrt(val) for i, val in enumerate(values) if val > 1e-5)\n return names, coords", "def get_neighbours(self):\n return self.points_to.keys()", "def getNeighbors(self):\n return self.neighbors", "def neighbors(self, u):\r\n return filter(lambda v: self.getCapacity((u,v)) > 0, self.adjacent[u])", "def helper(reviewer: Any, graph: Graph) -> set:\n reviewers_so_far = set()\n\n for movie in graph.get_neighbours(reviewer):\n for user in graph.get_neighbours(movie):\n if graph.get_weight(user, movie) >= 8:\n reviewers_so_far.add(user)\n return reviewers_so_far", "def get_adj_nodes(self):\n return [\n self.nearest_node + PVector(1, 0),\n self.nearest_node + PVector(0, 1),\n self.nearest_node + PVector(-1, 0),\n self.nearest_node + PVector(0, -1)]", "def get_other_neighbors(self, node):\n neighbors = self.get_neighbors()\n return list(set(neighbors) - set([node]))", "def get_nodes(self, latlon=False):\n ids = np.where(np.isnan(self.data[:,:,:]))\n i_nan = ids[0][0] ; j_nan = ids[1][0]\n \n def area_neighbours(Area, i_nan, j_nan):\n rows = np.array(Area)[:,0]\n cols = np.array(Area)[:,1]\n rows_m = rows-1\n cols_m = cols-1\n rows_p = rows+1\n cols_p = cols+1\n \n p1 = np.array([rows_m,cols]).ravel().reshape(len(rows),2,order='F')\n p2 = np.array([rows_p,cols]).ravel().reshape(len(rows),2,order='F')\n p3 = np.array([rows,cols_m]).ravel().reshape(len(rows),2,order='F')\n p4 = np.array([rows,cols_p]).ravel().reshape(len(rows),2,order='F')\n cond1 = p1[:,0]<0\n cond2 = p2[:,0]>self.dimX-1\n cond3 = p3[:,1]<0\n cond4 = p4[:,1]>self.dimY-1\n if latlon:\n p3[:,1][cond3] = self.dimY-1\n p4[:,1][cond4] = 0\n else:\n p3[:,0][cond3] = i_nan\n p3[:,1][cond3] = j_nan\n p4[:,0][cond4] = i_nan\n p4[:,1][cond4] = j_nan\n p1[:,0][cond1] = i_nan\n p1[:,1][cond1] = j_nan\n p2[:,0][cond2] = i_nan\n p2[:,1][cond2] = j_nan\n p = np.concatenate((p1,p2,p3,p4)).tolist()\n return [i for i in p if i not in self.unavail]\n\n def area_max_correlation(Area, neighbours):\n Rmean = [] ; X = []\n for cell in neighbours:\n R = []\n new_cell = cell[0]*self.dimY + cell[1]\n if new_cell in self.gridcells:\n X.append(cell)\n IDm = np.where(self.gridcells==new_cell)\n Rmean.append(np.nanmean(self.corrs[cells_in_k,IDm]))\n try:\n Rmax = np.nanmax(Rmean)\n except ValueError:\n Rmax = np.nan\n return np.array(X), Rmean, Rmax\n \n def diag_indices(a, k):\n rows, cols = np.diag_indices_from(a)\n if k < 0:\n return rows[-k:], cols[:k]\n elif k > 0:\n return rows[:-k], cols[k:]\n else:\n return rows, cols\n\n #S T E P 1 (C R E A T E N O D E S)\n\n self.nodes = {}\n self.unavail = []\n if latlon:\n neighbour_corrs1 = self.corrs.diagonal(offset=1)\n neighbour_corrs2 = self.corrs.diagonal(offset=self.dimY-1)\n subset = np.arange(0,len(neighbour_corrs2),self.dimY)\n neighbour_corrs2 = neighbour_corrs2[subset]\n neighbour_corrs = np.concatenate((neighbour_corrs1,neighbour_corrs2))\n\n cellIDs1 = diag_indices(self.corrs,1)\n cellIDs2 = diag_indices(self.corrs,self.dimY-1)\n\n cellIDs = (np.concatenate((cellIDs1[0],cellIDs2[0][subset])),\\\n np.concatenate((cellIDs1[1],cellIDs2[1][subset])))\n else:\n neighbour_corrs = self.corrs.diagonal(offset=1)\n cellIDs = diag_indices(self.corrs,1)\n \n cellIDs = (self.gridcells[cellIDs[0]],self.gridcells[cellIDs[1]])\n k = 0\n neighbour_corrs,cellIDs1,cellIDs2 = list(zip(*sorted(zip(neighbour_corrs,cellIDs[0],cellIDs[1]),reverse=True)))\n cell_IDs = (cellIDs1,cellIDs2)\n np.random.seed(2)\n for it in range(len(neighbour_corrs)):\n cells_in_k = []\n i = cell_IDs[0][it]\n j = cell_IDs[1][it]\n r = neighbour_corrs[it]\n \n row_i = int(np.floor(i/self.dimY)) ; col_i = int(i % self.dimY)\n row_j = int(np.floor(j/self.dimY)) ; col_j = int(j % self.dimY)\n \n if ([row_i,col_i] not in self.unavail) & ([row_j,col_j] not in self.unavail):\n if r>self.tau:\n self.nodes.setdefault(k, []).append([row_i,col_i])\n self.nodes.setdefault(k, []).append([row_j,col_j])\n self.unavail.append([row_i,col_i])\n self.unavail.append([row_j,col_j])\n cells_in_k.extend(np.where(self.gridcells==i)[0])\n cells_in_k.extend(np.where(self.gridcells==j)[0])\n\n while True: #expand\n neighbours = area_neighbours(self.nodes[k], i_nan, j_nan)\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[k], neighbours=neighbours)\n if Rmax > self.tau:\n m = X[Rmean==Rmax].tolist()\n if len(m)>1:\n m = m[np.random.randint(low=0,high=len(m))]\n else:\n m = m[0]\n self.nodes.setdefault(k, []).append(m)\n self.unavail.append(m)\n cells_in_k.extend(np.where(self.gridcells==m[0]*self.dimY+m[1])[0])\n else:\n break\n if len(self.nodes[k]) <= 2:\n del self.nodes[k]\n k += 1\n else:\n break\n \n #S T E P 2 (M E R G E N O D E S)\n \n self.unavail = []\n while True:\n Rs = {}\n unavail_neighbours = {}\n num_cells = dict([(area,len(self.nodes[area])) if self.nodes[area] not in self.unavail else (area,np.inf) for area in self.nodes.keys()])\n maxID = min(num_cells.items(), key=operator.itemgetter(1))[0]\n if num_cells[maxID] > 175: #arbitrary choice?\n break\n else:\n cells_in_k = [np.where(self.gridcells==cell[0]*self.dimY+cell[1])[0] for cell in self.nodes[maxID]]\n neighbours = area_neighbours(self.nodes[maxID], i_nan, j_nan)\n for cell in neighbours:\n gcell = cell[0]*self.dimY + cell[1]\n Rmean = []\n cond1 = gcell in self.gridcells\n cond2 = cell not in self.nodes[maxID]\n cond3 = cell not in [k for k, g in itertools.groupby(sorted(itertools.chain(*unavail_neighbours.values())))]\n cond4 = len([area for area, cells in self.nodes.items() if cell in cells]) > 0\n if (cond1) & (cond2) & (cond3) & (cond4):\n nID = [area for area, cells in self.nodes.items() if cell in cells][0]\n unavail_neighbours[nID] = self.nodes[nID]\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[nID]+self.nodes[maxID], neighbours=self.nodes[nID]+self.nodes[maxID])\n if nID not in Rs: \n Rs[nID] = np.nanmean(Rmean)\n try:\n Rs_maxID = max(Rs.items(), key=operator.itemgetter(1))[0]\n if Rs[Rs_maxID] > self.tau:\n for cell in self.nodes.pop(Rs_maxID, None):\n self.nodes.setdefault(maxID, []).append([cell[0],cell[1]])\n else:\n self.unavail.append(self.nodes[maxID])\n except ValueError:\n self.unavail.append(self.nodes[maxID])", "def neighbors(self):\n return self.graph.neighbors(self.id)", "def max_num_neighbors(self):\n return self._max_num_neighbors", "def select(self):\n best_qsa_star_add = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n qsa_star_add = qsa_star + 0.2 * self.c * sqrt(log(self.visits) / c.visits)\n if qsa_star_add > best_qsa_star_add:\n best_qsa_star_add = qsa_star_add\n best_node = c\n return best_node", "def find_reachable_nodes(self):\n # find all reachable nodes down from the goal\n found = {}\n found[id(self.root)] = self.root\n queue = [self.root]\n #print >>sys.stderr, '---'\n while queue:\n node = queue.pop(0)\n if hasattr(node, 'dead'):\n if node.dead:\n #print >>sys.stderr, 'dead', node\n continue\n assert not node.dead\n for edge in node.incoming:\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n if id(tailnode) not in found:\n found[id(tailnode)] = tailnode\n queue.append(tailnode)\n tailnode.nout = 0\n tailnode.nout += 1\n # save for sanity check\n self.found = found", "def neighbors(self):\n return self._neighbors", "def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1", "def get_visited_nodes(self):\n return self.visited_nodes", "def get_neighbours(self, x, k):\n k = min(k, self.n)\n nearest = {}\n for i in range(k):\n nearest[i] = self.euclidean_distance(x, self.train_x[i])\n for i in range(k, self.n):\n dist = self.euclidean_distance(x, self.train_x[i])\n if dist < max(nearest.values()):\n nearest.pop(max(nearest, key=nearest.get))\n nearest[i] = dist\n return nearest", "def get_local_maxes(self, use_full=False, strict=False, x_y=None):\n if x_y is None:\n if use_full:\n x, y = self.x_full, self.y_full\n y_offset = 0\n else:\n x, y = self.x, self.y\n y_offset = self.y_offset\n else:\n x, y = x_y\n y_offset = 0\n\n if strict:\n # take only those greater than both adjacent\n maxes = sps.argrelextrema(y, np.greater)[0]\n else:\n # take all greater/equal to both sides\n maxes = sps.argrelextrema(y, np.greater_equal)[0]\n\n # check that max_y values > 0\n maxes = maxes[y[maxes] > 0]\n\n # filter capped values on both sides\n maxes = maxes[y[maxes] != 5 - y_offset]\n\n max_x = x[maxes]\n max_y = y[maxes]\n\n return max_x, max_y", "def get_reachables(self, x, y):\n\n return [v for v in self.get_neighbors(x, y) if v in self._reachable[(x, y)]]", "def leaf_nodes(self):\n return self.nodes.filter(lft=models.F('rght') - 1)", "def best_upper_bound(self):\n if self._all_nodes:\n return min(node.upper_bound for node in self._all_nodes)\n else:\n return np.inf", "def exhaustive_search(data_set, target):\n\n # Initialize the outputs\n minimum_distance = float(\"inf\")\n nearest_neighbor = None\n\n # Search through the data set for the nearest neighbor\n for point in data_set:\n distance = euclidean_metric(target, point)\n if distance < minimum_distance:\n nearest_neighbor = point\n minimum_distance = distance\n return nearest_neighbor, minimum_distance", "def sort_nodes(self, min_return=5):\n nodes = self._all_nodes()\n sorted_nodes, sorted_scores = self.scorer.sort(nodes)\n\n if len(nodes) <= min_return:\n return sorted_nodes, sorted_scores\n\n seen_hashes = set()\n best_nodes = []\n best_scores = []\n last_score = 1e16\n for score, node in zip(sorted_scores, sorted_nodes):\n if len(best_nodes) >= min_return and score < last_score:\n break\n route_actions, _ = self.search_tree.route_to_node(node)\n route_hash = self._routehash(route_actions)\n\n if route_hash in seen_hashes:\n continue\n seen_hashes.add(route_hash)\n best_nodes.append(node)\n best_scores.append(score)\n last_score = score\n\n return best_nodes, best_scores", "def neighbors_of(self, node: Node) -> Set[Node]:\n return self._neighbors[node].copy()", "def _get_neighbors(self, p, min_x, max_x, min_y, max_y):\n x, y = p\n x0 = min_x if x == min_x else x - 1\n x1 = max_x + 1 if x == max_x else x + 2\n y0 = min_y if y == min_y else y - 1\n y1 = max_y + 1 if y == max_y else y + 2\n \n return [(x, y) for x in xrange(x0, x1) for y in xrange(y0, y1) if (x, y) != p]", "def find_neighbors(self, node, agent):\n neighbors = []\n n_nodes = len(self.all_nodes[agent])\n for node_temp in self.all_nodes[agent]:\n Vd = np.pi ** (self.dim / 2) / func.gamma(self.dim / 2 + 1)\n radius = min((self.gamma / Vd * np.log(n_nodes) / n_nodes) ** (1 / self.dim), self.eta)\n if np.linalg.norm(node_temp.state - node.state) < radius:\n neighbors.append(node_temp)\n return neighbors", "def initial_solution(self):\n cur_node = random.choice(self.nodes) # start from a random node\n solution = [cur_node]\n\n free_nodes = set(self.nodes)\n free_nodes.remove(cur_node)\n while free_nodes:\n next_node = min(free_nodes, key=lambda x: self.dist(cur_node, x)) # nearest neighbour\n free_nodes.remove(next_node)\n solution.append(next_node)\n cur_node = next_node\n\n cur_fit = self.fitness(solution)\n if cur_fit < self.best_fitness: # If best found so far, update best fitness\n self.best_fitness = cur_fit\n self.best_solution = solution\n self.fitness_list.append(cur_fit)\n return solution, cur_fit", "def iter_dist(self):\n self.makeTree()\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield coords[i], dists.compress((dists > sd) & ~np.isinf(dists))", "def __find_max_distance(self):\n return utils.find_max_distance(self.__game)", "def clusters(self):\n return np.argmax(self.alpha[:, :, 0], axis=0)", "def _get_neighbors(cls, pattern: str, max_distance: int) -> List[str]:\n return get_neighborhood(pattern, ''.join(cls.nucleobases.keys()), max_distance)", "def get_nodeset(self):\n return set(self.nodeset) # return the nodeset", "def getDistsSourceToNodes(self):\r\n self.run()\r\n return self.dists_so_far", "def trips_distance_constraint_bfs(self, start_node, end_node, max_distance):\n if start_node is None or len(start_node) != 1:\n raise Exception('Invalid start_city: %s' % start_node)\n if end_node is None or len(end_node) != 1:\n raise Exception('Invalid end_city: %s' % end_node)\n\n queue = [(start_node, 0, [])]\n paths = []\n while queue:\n node, distance, traceback = queue.pop(0)\n\n # We do not need to parse anymore as any further city addition will break our max_hops constraint\n if distance >= max_distance:\n continue\n\n path = traceback + [node]\n if len(path) > 2 and path[-1] == end_node:\n paths.append('-'.join(path) + (' (%d distance)' % distance))\n\n for neighbour in self.graph[node].keys():\n queue.append((neighbour, distance + self.graph[node][neighbour], traceback + [node]))\n\n return paths", "def closest_points(self, points, maxdist=None):\n return [self.closest_point(point, maxdist) for point in points]", "def find_result(self):\n result = []\n current_node = self.start_node\n while current_node.children:\n values = []\n for i in current_node.children:\n values += current_node.children[i]\n # find removed cells and then find the direct next move\n removed_cells = max(values)[1]\n for key, value in current_node.children.items():\n for i in value:\n if len(i) == 3 and i[1] == removed_cells:\n current_node = key\n result.insert(0, (current_node, removed_cells))\n break\n if current_node == key:\n break\n return result", "def get_matching(self):\n verts, plaqs, d_verts, d_plaqs = self.get_stabs()\n\n # def get_matching(anyons, d_anyons):\n # edges = self.get_edges(anyons)\n # for i0, i1, weight in edges:\n # nxgraph.add_edge(i0, i1, weight=-weight)\n # output = nx.algorithms.matching.max_weight_matching(nxgraph, maxcardinality=True)\n # return [[d_anyons[i0], d_anyons[i1]] for i0, i1 in output]\n\n def get_matching(anyons, d_anyons):\n output = pm.getMatching(len(anyons), self.get_edges(anyons))\n return [[d_anyons[i0], d_anyons[i1], anyons[i0], anyons[i1]] for i0, i1 in enumerate(output) if i0 > i1]\n\n self.matching = []\n if verts:\n self.matching += get_matching(verts, d_verts)\n if plaqs:\n self.matching += get_matching(plaqs, d_plaqs)", "def get_neighbourhood(self, radius: int = 1) -> set:\n if radius == 0:\n return set()\n result = self.neighbours.copy()\n if radius > 1:\n # Recursively get neighbours of neighbours.\n for neighbour in self.neighbours:\n result |= neighbour.get_neighbourhood(radius - 1)\n return result - {self}", "def findmaxnode(self):\n if not self._rightchild:\n return self\n return self._rightchild.findmaxnode()", "def subtree_distances(self, root):\r\n\r\n nodes = root.get_terminals()\r\n nodes.reverse()\r\n node_pairs = itertools.ifilter(\r\n lambda (a1, a2): a1.name < a2.name,\r\n itertools.product(nodes, nodes))\r\n\r\n distances = [self._node_distance(pair[0], pair[1])\r\n for pair in node_pairs]\r\n\r\n return distances", "def coords_reachable(self, start, distance): # TODO: Accept a lambda that\n # determines blocked or not\n visited = set() # set of hexes\n visited.add(start)\n fringes = list() # array of arrays of hexes\n fringes.append([start])\n\n for idx in range(1, distance+1):\n fringes.append([])\n for coord in fringes[idx-1]:\n for direction in self.dirs:\n neighbor = coord+direction\n if neighbor not in visited: # TODO: add exemptions (impassable)\n #or mandatory neighbors (direct\n #connections)\n visited.add(neighbor)\n fringes[idx].append(neighbor)\n\n return visited", "def get_optimal_patches(self):\n self.optimal_patch_centers = list()\n # Backtrace through cost to determine optimal samples\n for i in range(self.cost_matrix.shape[0] - 1, -1, -1):\n idx = self.nodes_min_energy_index(i)\n node = self.min_energy_index[i][idx]\n self.optimal_patch_centers.append(node)\n self.optimal_patch_centers.reverse()\n self.optimal_patch_centers = [\n int(patch) for patch in self.optimal_patch_centers if np.isfinite(patch)\n ]\n optimal_patch_centers = list()\n for patch_center in self.optimal_patch_centers:\n if (\n self.source_patches[self.patch_centers[patch_center]].size\n != self.patch_size * self.patch_size * 3\n ):\n node = patch_center - 1 if patch_center > 1 else patch_center + 1\n optimal_patch_centers.append(node)\n if optimal_patch_centers:\n self.optimal_patch_centers = optimal_patch_centers", "def test_node_weight_range_max(self):\n n = Node(inputs=3)\n for i in n.weights:\n self.assertLess(i, 0.1)" ]
[ "0.6185975", "0.61843127", "0.6099087", "0.60814947", "0.6033172", "0.60061944", "0.5881994", "0.58049726", "0.5757084", "0.57157856", "0.57139254", "0.5710196", "0.57095784", "0.5681697", "0.5668638", "0.56296057", "0.56086", "0.5608161", "0.5601407", "0.5596248", "0.55825", "0.55809957", "0.5579248", "0.55698985", "0.5566113", "0.55277824", "0.55240047", "0.5519509", "0.551765", "0.54933506", "0.54914516", "0.54914516", "0.54914516", "0.54914516", "0.54849225", "0.5481818", "0.54761696", "0.5474141", "0.54679805", "0.5449768", "0.54489434", "0.54480195", "0.54373395", "0.54359853", "0.5432657", "0.54229057", "0.54033875", "0.5403142", "0.54018784", "0.53894997", "0.5386638", "0.53848773", "0.5376375", "0.5364534", "0.53543204", "0.53448486", "0.5343715", "0.53377485", "0.53207636", "0.532009", "0.531389", "0.5310235", "0.53083986", "0.52995193", "0.5297083", "0.5293995", "0.5272721", "0.52315474", "0.52237916", "0.52206403", "0.5219226", "0.52143323", "0.5211694", "0.5209107", "0.52059615", "0.51995283", "0.5198632", "0.51974785", "0.5190165", "0.5180995", "0.518024", "0.5175352", "0.5163183", "0.5152844", "0.5139275", "0.5125474", "0.51229364", "0.5120401", "0.510442", "0.51007956", "0.5098558", "0.5097776", "0.5087401", "0.5086799", "0.5086783", "0.5081257", "0.5079828", "0.5079647", "0.5079551", "0.5076265" ]
0.7041858
0
show the neighborhood of this node in a picture
def show_neighborhood(self, max_dist=3, detailed=True): dotstr = '' for node in self.neighbors(max_dist): if node is self: dotstr += node.dot(color='dodgerblue', detailed=detailed) else: dotstr += node.dot(detailed=detailed) dotstr = 'digraph hypergraph {\nrankdir=BT\n%s}\n' % dotstr f = open('/tmp/dotty', 'w') f.write(dotstr) f.close() os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif') os.system('eog /tmp/dotty.gif')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()", "def show_neighbours(self):\n if self.connected_to:\n s = \"\"\n for connection in self.connected_to:\n s += f\"{connection.get_name()} \"\n return s\n return \"No neighbours\"", "def __repr__(self):\n s = self.regular_neighborhood()\n return 'Train track on the ' + repr(s).lower()", "def test_d2_get_neighborhood_small(self):\n config.NR_COLS = 3\n config.NR_ROWS = 3\n gamefield = [\n [1, 0, 0],\n [1, 0, 0],\n [0, 1, 1],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 3)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 2)\n self.assertEqual(nh, 4)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 2, 0)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 2, 2)\n self.assertEqual(nh, 3)\n # center\n nh = logic.get_neighborhood(gamefield, 1, 1)\n self.assertEqual(nh, 4)", "def create_graph(self):\n robot_pix = int(math.ceil(self.robot.size / self.resolution))\n ii = 0\n jj = 0\n for i in range(0, self.height, robot_pix):\n jj = 0\n for j in range(0, self.width, robot_pix):\n block = self.occ_grid[i:i+robot_pix, j:j+robot_pix].flatten()\n avg = np.mean(block)\n robot_block = self.tesselation_image[i:i+robot_pix, j:j+robot_pix].flatten()\n n_occur = np.bincount(robot_block)\n block_id = np.argmax(n_occur)\n \n p = Pose()\n p.position.x = self.resolution * j + self.resolution / 2.0 + self.origin.position.x\n p.position.y = self.height * self.resolution - (self.resolution * i + self.resolution / 2.0) + self.origin.position.y\n node = Node(ii, jj, p)\n idx = np.where(block > 20)\n if block_id == self.robot.robot_id:\n if 0 <= avg <= 20:\n print(\"Node in path\", node)\n node.valid = True\n else:\n node.valid = False\n elif block_id == 0:\n node.valid = False\n else:\n node.belongs = False\n self.nodes[ii,jj] = node\n jj += 1\n ii += 1\n\n\n height, width = self.nodes.shape\n print(\"Node shape: \", self.nodes.shape)\n for i in range(height):\n for j in range(width):\n min_i = max(0, i-1)\n max_i = min(height - 1, i+1) + 1\n min_j = max(0, j-1)\n max_j = min(width - 1, j+1) + 1\n\n node = self.nodes[i,j]\n neighbors = self.nodes[min_i:max_i, min_j:max_j].flatten()\n for n in neighbors:\n if not n or not node:\n print(\"None %d-%d\"%(i,j))\n continue\n if n != node:\n if n.valid:\n print(\"Neighbor appended\")\n self.nodes[i,j].neighbors.append(n)\n else:\n self.nodes[i,j].obstacle_neighbors.append(n)\n print(\"Graph is created!\")", "def test_d1_get_neighborhood(self):\n config.NR_COLS = 10\n config.NR_ROWS = 10\n gamefield = [\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 4)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 8)\n self.assertEqual(nh, 2)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 9, 1)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 9, 9)\n self.assertEqual(nh, 4)\n # center\n nh = logic.get_neighborhood(gamefield, 4, 5)\n self.assertEqual(nh, 3)", "def neighbourhood(self, node1, node2, t):\n raise NotImplementedError", "def draw_neighbor_counts(img_bgr, rafts_loc, num_of_rafts):\n points = rafts_loc\n vor = ScipyVoronoi(points)\n neighbor_counts = np.zeros(num_of_rafts, dtype=int)\n for raft_id in range(num_of_rafts):\n neighbor_counts[raft_id] = np.count_nonzero(vor.ridge_points.ravel() == raft_id)\n\n font_face = cv.FONT_ITALIC\n font_scale = 0.5\n font_color = (0, 165, 255) # BGR\n font_thickness = 1\n output_img = img_bgr\n for raft_id in np.arange(num_of_rafts):\n text_size, _ = cv.getTextSize(str(raft_id + 1), font_face, font_scale, font_thickness)\n output_img = cv.putText(output_img, str(neighbor_counts[raft_id]),\n (rafts_loc[raft_id, 0] + text_size[0] // 2, rafts_loc[raft_id, 1] + text_size[1]),\n font_face, font_scale, font_color, font_thickness, cv.LINE_AA)\n\n return output_img", "def show_nn(X):\n neigh = NearestNeighbors(n_neighbors=2)\n nbrs = neigh.fit(X)\n distances, indices = nbrs.kneighbors(X)\n distances = np.sort(distances, axis=0)\n distances = distances[:,1]\n plt.plot(distances)", "def addNeighbor(self, neighbor):", "def placeNodes(imgR):\n nodes = []\n N,M = np.shape(imgR)\n for i in range(N):\n for j in range(M):\n loc = (i,j)\n if imgR[loc] == 0.:\n if len(adjPaths(imgR,loc)) > 2:\n nodes.append(loc)\n return nodes", "def vision(image):\n vis_map = resize(image, alpha, beta)\n print(\"Resized map from the blue mask\")\n\n world = rotate(vis_map)\n\n plt.figure()\n plt.imshow(world[:, :, ::-1])\n plt.show()\n object_grid, occupancy_grid = detect_object(world)\n print(\"Result of the red mask\")\n plt.figure()\n plt.imshow(occupancy_grid)\n plt.show()\n return object_grid, occupancy_grid, world", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def show_image(self, idx):\n image, target = self.__getitem__(self, idx)\n im_h, im_w, _ = image.size()\n labels_num = target['labels']\n rescale = torch.tensor([[im_w, im_h, im_w, im_h]])\n bboxs = target['boxes'] * rescale\n img = image.permute(1, 2, 0).numpy()\n for i, bboxe in enumerate(bboxs):\n x, y, xm, ym = bboxe\n label = class_name[int(labels_num[i])]\n plot_one_box((int(x), int(y), int(xm), int(ym)), img, label=label, line_thickness=3)\n cv2.imshow('image', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)", "def print_neighbours(self, word=''):\n\n if word in self.index.keys():\n word_ind = self.index[word]\n for i in self.graph[word_ind]:\n print(self.words[i])\n print()\n else:\n print('Error - Not a valid word')", "def neighbors(self, x):\n pass", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def getneighbors(self):\r\n\t\ti=self.cell[0]\r\n\t\tj=self.cell[1]\r\n\t\t\r\n\t\tw = self.width-1\r\n\t\tCenter = self.base[i][j]\r\n\t\tif(self.type==\"Neumann\"):\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tself.surrounding = [North,South,East,West]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)\r\n\t\t\t\r\n\t\telif(self.type==\"Moore\"):\r\n\t\t\t\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\t\t\t\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][0]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][w]\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][0]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tself.surrounding = [North,South,East,West,NE,NW,SE,SW]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)+str(NE)+str(NW)+str(SE)+str(SW)", "def debug(self):\n neighbors = len(self.__neighbors)\n string = self.__repr__() + f' neighbors: {self.living_neighbors()}/{neighbors}'\n for neighbor in self.__neighbors:\n string += '\\n ' + neighbor.__repr__()\n print(string)", "def show_one(img):\n dpi = 40\n margin = 0.05\n nda = sitk.GetArrayFromImage(img)\n spacing = img.GetSpacing()\n extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0)\n figsize = (5, 5)\n fig = plt.figure(figsize=figsize, dpi=dpi)\n ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])\n\n plt.set_cmap(\"gray\")\n ax.imshow(nda, extent=extent, interpolation=None)", "def display(self): \n print ' ' \n print 'Connect ', NWIN, ' Board '\n print ' ' \n for r in reversed(range(self.getHeight())):\n for c in range(self.getWidth()):\n if self.cell[c][r] == BLACK:\n print '+',\n elif self.cell[c][r] == WHITE:\n print '-',\n else:\n print '.',\n print ' '\n for c in range(self.getWidth()):\n print c,\n print ' '\n print ' '", "def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()", "def get_neighbours(self):\n return []", "def neighbor_node(self, node):\n neighborhood_node = []\n for i in range(self.nodenum):\n if(self.Adjmatrix[node, i] == 1):\n neighborhood_node.append(i)\n \n return neighborhood_node", "def draw(self):\n nx.draw_networkx(self.rc)", "def find_neighbors(self):\n x, y = self.position\n\n for i in range(3):\n for j in range(3):\n try:\n self.neighbors.append(self.stitches[(x - 1 + i, y - 1 + j)].position)\n except:\n pass\n\n # this cell will be added by default so we must delete at the end\n self.neighbors.remove(self.position)", "def draw_nodes(self):\n pass", "def plot_networks(student, shape):\n plt.figure()\n s = np.arange(np.prod(shape))\n plt.figure()\n value = student.train_model.value(s).reshape(shape)\n plt.imshow(value)\n\n pi = student.train_model.proba_step(s).T.reshape((-1,) + shape)\n x, y = np.unravel_index(s, shape)\n\n for a in range(pi.shape[0]):\n if a == UP:\n u = np.zeros_like(s)\n v = pi[a].T.ravel()\n if a == DOWN:\n u = np.zeros_like(s)\n v = -pi[a].T.ravel()\n if a == RIGHT:\n v = np.zeros_like(s)\n u = pi[a].T.ravel()\n if a == LEFT:\n v = np.zeros_like(s)\n u = -pi[a].T.ravel()\n plt.quiver(x, y, u, v)", "def _neighbours_html(self):\n self._make_svg_script()\n\n ret = {\n 'rt_label': self.rt_label,\n 'uri': self.uri,\n 'uri_encoded': self.uri_encoded,\n 'label': self.label,\n 'nid': self.nid,\n 'gat': self.gat,\n 'rs_encoded': self.rs_encoded,\n 'rs_label': self.rs_label,\n 'sa': self.sa,\n 'ea': self.ea,\n 'script': self.script\n }\n\n return render_template(\n 'class_report.html',\n report=ret\n )", "def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")", "def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def draw_house_walls(x, y, width, height):\n print('Типа рисую стены...', x, y, width, height)", "def paint_a_picture():\n # Make a training set (many random i,j coord and an x by y box around that coord to start with)\n # Throw it into the net\n # Test how it does for some random coordinate inputs\n pass", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def neighborhood((y, x), (height, width)):\n return [(yt, xt) for xt in [x + 1, x, x - 1]\n for yt in [y + 1, y, y - 1]\n if 0 <= xt < width and 0 <= yt < height\n and (xt, yt) != (x, y)]", "def display_edges(image, g, threshold):\n\timage = image.copy()\n\t# for edge in g.edges_iter():\n\tfor edge in g.edges:\n\t\tn1, n2 = edge\n\n\t\tr1, c1 = map(int, rag.nodes[n1]['centroid'])\n\t\tr2, c2 = map(int, rag.nodes[n2]['centroid'])\n\n\t\tline = draw.line(r1, c1, r2, c2)\n\t\tcircle = draw.circle(r1, c1, 2)\n\n\t\tif g[n1][n2]['weight'] < threshold:\n\t\t\timage[line] = 0, 1, 0\n\t\timage[circle] = 1, 1, 0\n\n\treturn image", "def show_map_window(image):\n cv2.imshow(_WINDOW_NAME, image)", "def show_env(self, img):\n plt.figure(1)\n plt.subplot(111)\n plt.imshow(img, interpolation=\"nearest\")\n plt.show()", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def draw_occupied_cells(self):\n reds = [cell for cell in self.game.get_cells() if cell.player == 1]\n blacks = [cell for cell in self.game.get_cells() if cell.player == 2]\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=reds,\n edgecolors='black', node_color='red', linewidths=2)\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=blacks,\n edgecolors='black', node_color='black', linewidths=2)", "def draw_open_cells(self):\n empty_cells = [cell for cell in self.game.get_cells() if cell.player == 0]\n nx.draw_networkx_nodes(self.G, pos=self.positions, nodelist=empty_cells,\n edgecolors='black', node_color='white', linewidths=2)", "def neighbors(node, topology):\n return [n for n in topology[node]]", "def cell_edges(self):", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def neighboring_cells(self, cell_id, include_self=False):\n\t\tx, y, t = self._xyt_from_cell_id(cell_id)\n\n\t\tncells = bhpix.neighbors(x, y, self.level, include_self)\n\t\tfor (cx, cy) in ncells:\n\t\t\tif fabs(fabs(cx) - fabs(cy)) > 0.5:\n\t\t\t\tprint \"PROBLEM: \", x, y, cx, cy\n\t\t\t\tprint ncells\n\n\t\tnhood = [ self._cell_id_for_xyt(x, y, t) for (x, y) in ncells ]\n\n\t\t# TODO: Remove once we're confident it works\n\t\trrr = set([ self._xyt_from_cell_id(cid)[:2] for cid in nhood ])\n\t\tassert rrr == ncells\n\t\tassert cell_id not in rrr\n\n\t\treturn nhood", "def see_image(self, idx, show=True):\n true_label = self.true_targets[idx]\n img, label, _ = self.__getitem__(idx) # img has channel as 1st dim\n img = np.transpose(img.numpy(), (1, 2, 0)) # channel as last dim\n if show:\n plt.imshow(img)\n plt.title(f\"Label: {self.classes_labels[true_label]}\")\n plt.show()\n else:\n return img, label, true_label", "def drawNetwork(self, screen):\r\n # world = max(self.worlds, key=lambda w: w.nn.fitness)\r\n # draw the world\r\n world = self.worlds[0]\r\n world.renderer.render(screen)\r\n\r\n networkSurface = pygame.Surface((750, 180)).convert_alpha()\r\n networkSurface.fill((0, 0, 0, 0))\r\n # draw the minimap and network\r\n networkrenderer.render_network(networkSurface, world.nn, world.minimapValues)\r\n screen.blit(networkSurface, (10, 60))", "def get_neighbours(self):\n return self.neighbours", "def draw_node(j, x, y):\n return \"\\\\node(T\" + str(j) + \") at (\" + str(x) + \",\" + str(y) + \") {\" + str(j) + \"};\\n\"", "def neighbours(self):\n return [x.node for x in self.edges]", "def neighbor(self,node):\n return self.__graph[node]", "def extractNeighbourhood(img,x,y):\n nhood = np.zeros((x.shape[0],9), dtype = bool);\n \n # calculate indices (if many voxels this is only 9 loops!)\n for xx in range(3):\n for yy in range(3):\n #w = _xyz_to_neighbourhood[xx,yy,zz];\n w = 3 * xx + yy;\n idx = x+xx-1; idy = y+yy-1;\n nhood[:,w]=img[idx, idy];\n \n nhood.shape = (nhood.shape[0], 3, 3);\n nhood[:, 1, 1] = 0;\n return nhood;", "def showNeighbours(logger, version=None):\n\n if version:\n if version == 4:\n return Command.executeIp(logger, IpConstant.IPV4, IpOption.NEIGHBOUR, IpAction.SHOW) \n elif version == 6:\n return Command.executeIp(logger, IpConstant.IPV6, IpOption.NEIGHBOUR, IpAction.SHOW) \n\n rc = Command.executeIp(logger, IpOption.NEIGHBOUR, IpAction.SHOW) \n return rc", "def show_holes_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n radius=9\n out_image = img.copy()\n out_image = cv2.cvtColor(out_image, cv2.COLOR_GRAY2RGB)\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n y_center = (dy.start + dy.stop - 1)/2 \n center=(x_center,y_center)\n cv2.circle(out_image, center, radius,(111,17,108),thickness=2)\n\n plt.figure()\n plt.imshow(out_image)\n plt.autoscale(False)\n return out_image", "def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")", "def neighbourhood(index, image_width, image_height):\n\tneighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))\n\tif len(index.shape) == 2:\n\t\tneighbourhoods = neighbourhoods[:, np.newaxis, :]\n\n\tneighbours_and_itself = index + neighbourhoods\n\tkeep = np.ones(9, dtype=bool)\n\tkeep[4] = False # drop the point itself, but keep the neighbours.\n\tneighbours = neighbours_and_itself[keep]\n\tif len(index.shape) == 2:\n\t\tneighbours = np.stack(neighbours, axis=1)\n\t\n\tmask = np.ones_like(neighbours, dtype=bool)\n\t# remove all neighbours that have either a negative value in them\n\tnegative = np.where(neighbours < 0)\n\tmask[negative] = False\n\t# or a value equal to image_height in x\n\tgreater_than_image_height = np.where(neighbours[..., 0] >= image_height)\n\tmask[greater_than_image_height] = False\n\t# or image_width in z\n\tgreater_than_image_width = np.where(neighbours[..., 1] >= image_height)\n\tmask[greater_than_image_width] = False\n\t# or that correspond to an index in 'index'\n\ttiled = np.expand_dims(index, 1)\n\ttiled = np.tile(tiled, (1, neighbours.shape[1], 1))\n\tequal_to_index = np.equal(neighbours, tiled)\n\tequal_to_index = np.all(equal_to_index, axis=-1)\n\tmask[equal_to_index] = False\n\t\n\tmask = np.all(mask, axis=-1)\n\n\t# print(mask)\n\t# for i, (m, n) in enumerate(zip(mask, neighbours)):\n\t# \tif len(index.shape) == 2:\n\t# \t\tfor keep, (i, j) in zip(m, n):\n\t# \t\t\tprint(\"point\", i, j, \"is good:\", keep)\n\t# \telse:\n\t# \t\tkeep = m\n\t# \t\ti, j = n\n\t# \t\tprint(\"point\", i, j, \"is good:\", keep)\n\t\t\n\tneighbours = neighbours[mask]\n\t# get rid of duplicates:\n\tneighbours = np.unique(neighbours, axis=0)\n\treturn neighbours\n\t# # print(image[row, col])\n\t# min_x = max(i-1, 0)\n\t# max_x = min(i+1, image_w-1)\n\t# min_y = max(j-1, 0)\n\t# max_y = min(j+1, image_h-1)\n\t# indices = set(\n\t# \t(x, y)\n\t# \tfor x in range(min_x, max_x + 1)\n\t# \tfor y in range(min_y, max_y + 1)\n\t# )\n\t# print(indices)\n\t# indices.discard((i, j))\n\t# return indices\n\t# # return np.array(indices)", "def neighborhood(G,n,o):\n base = G[n]\n neighbors = {}\n neighbors[n] = 0\n newNodes = set(neighbors.keys())\n for i in range(1,o+1):\n #for node in neighbors.keys():\n nodes = newNodes.copy()\n newNodes = set()\n for node in nodes:\n branch = G[node]\n for node in branch:\n if node not in neighbors:\n newNodes.add(node)\n neighbors[node]=i\n return neighbors", "def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image", "def cxneighborhood(self, pkg_name, plot=False, **kwargs):\n if pkg_name not in self:\n find = self.cxfind(pkg_name)\n if len(find) == 1:\n print(\"There is no package name %s.\\n\"\n \"I will use %s instead.\" %\n (pkg_name, find[0]))\n pkg_name = find[0]\n else:\n print(\"There is no package name %s,\\n\"\n \"but the package names below include it.\\n \" % pkg_name,\n \"\\n \".join(find))\n return []\n subgraph = networkx.ego_graph(self, pkg_name, undirected=1)\n\n if plot:\n node_color = kwargs.get(\"node_color\", \"skyblue\")\n font_color = kwargs.get(\"font_color\", \"red\")\n prog = kwargs.get(\"prog\", \"dot\")\n networkx.draw_graphviz(\n subgraph,\n prog=prog,\n root=pkg_name,\n font_color=font_color,\n node_color=node_color)\n\n return subgraph", "def display_npcs(self):\r\n if self.location == world.LocationType.INDOORS:\r\n print(\"In the room before you, you see:\")\r\n\r\n for i in range(len(self.npc_list)):\r\n print(\"A \" + self.npc_list[i].name + \" (Distance: \" + str(self.npc_distances[i]) + \"ft.)\")", "def showGraph(G, mate, label=\"\"):\r\n \r\n # Set the positions for all nodes and the figure size\r\n plt.close('all')\r\n plt.figure( figsize=(10, 10) )\r\n pos = nx.graphviz_layout(G, prog='sfdp', args='')\r\n \r\n # Draw the graph with node labels and a title\r\n plt.title(label)\r\n nx.draw(G, pos, node_size=400, with_labels=True)\r\n \r\n # Draw the matched edges\r\n nx.draw_networkx_edges(G, pos, edgelist=mate.items(),\r\n width=5, alpha=0.4, edge_color='b')\r\n \r\n plt.axis('off')\r\n plt.show()", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def plot_potential(self):\n imshow(self.U, extent=(self.x[0], self.x[-1], self.y[0], self.y[-1]), aspect='auto', interpolation='None')\n xlabel('x')\n ylabel('y')", "def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()", "def show(self) -> None:\n cv.imshow(str(self.__class__), self.output_image)", "def node_graph(imageSkeleton, imageGaussian):\n ones = np.ones((3, 3))\n imageFiltered = sp.ndimage.generic_filter(imageSkeleton, node_find, footprint=ones, mode='constant', cval=0)\n imageNodeCondense = node_condense(imageFiltered, imageGaussian, ones)\n imageLabeledNodes = skimage.segmentation.relabel_sequential(imageNodeCondense)[0]\n imageLabeledSkeleton, labels = sp.ndimage.label(imageSkeleton, structure=ones)\n for label in range(1, labels + 1):\n detectedNodes = np.max((imageLabeledSkeleton == label) * (imageLabeledNodes > 0))\n if (detectedNodes == 0):\n imageSkeleton[imageLabeledSkeleton == label] = 0\n imageAnnotated = 1 * ((imageSkeleton + imageLabeledNodes) > 0) + imageLabeledNodes\n return(imageAnnotated)", "def print(self):\n # it would be nice just to add one point instead of printing all again from scratch\n stones_player_0 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == -1]\n stones_player_1 = [(i, j) for i in range(self.size) for j in range(self.size) if self.board[i, j] == 1]\n plt.plot([0, self.size-1, 0, self.size-1], [0, 0, self.size-1, self.size-1], marker='x', ls='')\n plt.plot(*zip(*stones_player_0), marker='o', color='r', ls='')\n plt.plot(*zip(*stones_player_1), marker='o', color='b', ls='')\n\n plt.draw()\n plt.show(block=False)", "def show(self, name='Detections'):\n cv2.imshow(name, self.get_image())\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def show(image,label,pred):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n s=\"True Label : \"+str(label)+\" Predicted label : \"+str(pred)\n pyplot.xlabel(s,fontname=\"Arial\", fontsize=20 )\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show_image(self):\n cv2.imshow(self.config.DISPLAY_NAME, self.image)", "def show(image, label, weights, prediction, ax):\n global img_objects\n if len(img_objects)==0:\n for i in range(10):\n _img = ax[0, i].imshow(weights[i].reshape(28,28), cmap='gray')\n img_objects.append(_img)\n _img = ax[1, 5].imshow(image.reshape(28,28), cmap='gray')\n img_objects.append(_img)\n else:\n for i in range(10):\n img_objects[i].set_data(weights[i].reshape(28,28))\n img_objects[i].set_clim(vmin=0, vmax=np.max(weights[i]))\n img_objects[10].set_data(image.reshape(28,28))\n ax[0,5].set_title('truth: %d, predict: %d'%(np.argmax(label), prediction))", "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def neighbors_graph(aligner):\n # FIXME: This should properly test for overlap, possibly via\n # intersection of bounding rectangles.\n if not hasattr(aligner, '_neighbors_graph'):\n pdist = scipy.spatial.distance.pdist(aligner.metadata.positions,\n metric='cityblock')\n sp = scipy.spatial.distance.squareform(pdist)\n max_distance = aligner.metadata.size.max() + 1\n edges = zip(*np.nonzero((sp > 0) & (sp < max_distance)))\n graph = nx.from_edgelist(edges)\n graph.add_nodes_from(range(aligner.metadata.num_images))\n aligner._neighbors_graph = graph\n return aligner._neighbors_graph", "def show(self, screen):\n x_display = self.xy_position[0] * constants.CELL_SIZE\n y_display = self.xy_position[1] * constants.CELL_SIZE\n screen.blit(self.image, (x_display, y_display))", "def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()", "def show_digit( Pixels ):\r\n print(Pixels.shape)\r\n Patch = Pixels.reshape((8,8))\r\n plt.figure(1, figsize=(4,4))\r\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # cm.gray_r # cm.hot\r\n plt.show()", "def display(self):\n display(self.image)", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(20, 20))\n \n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.savefig(str(image_id)+'_seg.jpg',bbox_inches='tight')\n plt.close()", "def branches(image):\n return _neighbors_conv(image) > 2", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def show_image(self, mask=np.ones((32, 32), dtype=bool)):\n image = np.copy(self.__image)\n image[~mask] = 0\n plt.imshow(image, aspect=\"auto\")\n plt.show()", "def drawCell(self,land,uland,vland,marked):\n from math import sqrt, pow\n #--Tranlate grid point (u,v) to pixel point\n if not self.changed: self.edit()\n #--u/v max/min are grid range of visible map. \n #--wcell is bit width of cell. 512 is bit width of visible map.\n (umin,umax,vmin,vmax,wcell,wmap) = (-28,27,-27,28,9,512)\n if not ((umin <= uland <= umax) and (vmin <= vland <= vmax)):\n return\n #--x0,y0 is bitmap coordinates of top left of cell in visible map.\n (x0,y0) = (4 + wcell*(uland-umin), 4 + wcell*(vmax-vland))\n #--Default to deep\n mapc = [Fmap.DEEP]*(9*9)\n heights = land and land.getHeights()\n if heights:\n #--Land heights are in 65*65 array, starting from bottom left. \n #--Coordinate conversion. Subtract one extra from height array because it's edge to edge.\n converter = [(65-2)*px/(wcell-1) for px in range(wcell)]\n for yc in range(wcell):\n ycoff = wcell*yc\n yhoff = (65-1-converter[yc])*65\n for xc in range(wcell):\n height = heights[converter[xc]+yhoff]\n if height >= 0: #--Land\n (r0,g0,b0,r1,g1,b1,scale) = (66,48,33,32,23,16,sqrt(height/3000.0))\n scale = int(scale*10)/10.0 #--Make boundaries sharper.\n r = chr(max(0,int(r0 - r1*scale)) & ~1)\n else: #--Sea\n #--Scale color from shallow to deep color.\n (r0,g0,b0,r1,g1,b1,scale) = (37,55,50,12,19,17,-height/2048.0)\n r = chr(max(0,int(r0 - r1*scale)) | 1)\n g = chr(max(0,int(g0 - g1*scale)))\n b = chr(max(0,int(b0 - b1*scale)))\n mapc[xc+ycoff] = r+g+b\n #--Draw it\n mapd = self.mapd\n for yc in range(wcell):\n ycoff = wcell*yc\n ymoff = wmap*(y0+yc)\n for xc in range(wcell):\n cOld = mapd[x0+xc+ymoff]\n cNew = mapc[xc+ycoff]\n rOld = ord(cOld[0])\n #--New or old is sea.\n if (ord(cNew[0]) & 1) or ((rOld & 1) and\n (-2 < (1.467742*rOld - ord(cOld[1])) < 2) and\n (-2 < (1.338710*rOld - ord(cOld[2])) < 2)):\n mapd[x0+xc+ymoff] = cNew\n if marked:\n self.drawBorder(Fmap.MARKED,x0+2,y0+2,x0+7,y0+7,1)\n pass", "def make_graph(imageAnnotated, imageGaussian):\n nodeNumber = imageAnnotated.max() - 1\n distanceDiagonalPixels, distanceDiagonalPixelsCubic = np.sqrt(2.0), np.sqrt(3.0)\n distanceMatrix = np.array([[distanceDiagonalPixelsCubic, distanceDiagonalPixels, distanceDiagonalPixelsCubic], [distanceDiagonalPixels, 1, distanceDiagonalPixels],\n [distanceDiagonalPixelsCubic, distanceDiagonalPixels, distanceDiagonalPixelsCubic]])\n nodePositions = np.transpose(np.where(imageAnnotated > 1))[:, ::-1]\n imagePropagatedNodes = imageAnnotated.copy()\n imageFilamentLength = 1.0 * (imageAnnotated.copy() > 0)\n imageFilamentIntensity = 1.0 * (imageAnnotated.copy() > 0)\n dimensionY, dimensionX = imageAnnotated.shape\n filament = (imagePropagatedNodes == 1).sum()\n while (filament > 0):\n nodePixel = np.transpose(np.where(imagePropagatedNodes > 1))\n for posY, posX in nodePixel:\n xMin, xMax, yMin, yMax = bounds(posX - 1, 0, dimensionX), bounds(posX + 2, 0, dimensionX), bounds(posY - 1, 0, dimensionY), bounds(posY + 2, 0, dimensionY)\n nodeNeighborhood = imagePropagatedNodes[yMin:yMax, xMin:xMax]\n nodeFilamentLength = imageFilamentLength[yMin:yMax, xMin:xMax]\n nodeFilamentIntensity = imageFilamentIntensity[yMin:yMax, xMin:xMax]\n imagePropagatedNodes[yMin:yMax, xMin:xMax] = np.where(nodeNeighborhood == 1, imagePropagatedNodes[posY, posX], nodeNeighborhood)\n imageFilamentLength[yMin:yMax, xMin:xMax] = np.where(nodeFilamentLength == 1, distanceMatrix[0:yMax - yMin, 0:xMax - xMin] + imageFilamentLength[posY, posX], nodeFilamentLength)\n imageFilamentIntensity[yMin:yMax, xMin:xMax] = np.where(nodeFilamentIntensity == 1, imageGaussian[posY, posX] + imageFilamentIntensity[posY, posX], nodeFilamentIntensity)\n filament = (imagePropagatedNodes == 1).sum()\n graph = nx.empty_graph(nodeNumber, nx.MultiGraph())\n filamentY, filamentX = np.where(imagePropagatedNodes > 1)\n for posY, posX in zip(filamentY, filamentX):\n nodeIndex = imagePropagatedNodes[posY, posX]\n xMin, xMax, yMin, yMax = bounds(posX - 1, 0, dimensionX), bounds(posX + 2, 0, dimensionX), bounds(posY - 1, 0, dimensionY), bounds(posY + 2, 0, dimensionY)\n filamentNeighborhood = imagePropagatedNodes[yMin:yMax, xMin:xMax].flatten()\n filamentLength = imageFilamentLength[yMin:yMax, xMin:xMax].flatten()\n filamentIntensity = imageFilamentIntensity[yMin:yMax, xMin:xMax].flatten()\n for index, pixel in enumerate(filamentNeighborhood):\n if (pixel != nodeIndex and pixel > 1):\n node1, node2 = np.sort([nodeIndex - 2, pixel - 2])\n nodeDistance = sp.linalg.norm(nodePositions[node1] - nodePositions[node2])\n filamentLengthSum = imageFilamentLength[posY, posX] + filamentLength[index]\n filamentIntensitySum = imageFilamentIntensity[posY, posX] + filamentIntensity[index]\n minimumEdgeWeight = max(1e-9, filamentIntensitySum)\n edgeCapacity = 1.0 * minimumEdgeWeight / filamentLengthSum\n edgeLength = 1.0 * filamentLengthSum / minimumEdgeWeight\n edgeConnectivity = 0\n edgeJump = 0\n graph.add_edge(node1, node2, edist=nodeDistance, fdist=filamentLengthSum, weight=minimumEdgeWeight, capa=edgeCapacity, lgth=edgeLength, conn=edgeConnectivity, jump=edgeJump)\n return(graph, nodePositions)", "def display_images(digits_im):\n i = 0\n\n for img in digits_im:\n if i < N_NEIGHBOURS:\n # Visualize your data\n im_max = np.max(img)\n img = PIXELS * (np.abs(im_max - img) / im_max)\n res = cv2.resize(img, (DIM, DIM), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite('digit ' + str(i) + '.png', res)\n i += 1\n else:\n break", "def showAssembled(self):\n im = np.zeros(self.puzzleImage.shape);\n r,c,d = self.puzzleImage.shape;\n r = r/len(self.puzzlePieces); # assume square matrix\n c = c/len(self.puzzlePieces);\n \n for i in range (len(self.puzzlePieces)):\n for j in range (len(self.puzzlePieces)):\n im[i*r:(i+1)*r, j*c:(j+1)*c] = self.puzzlePieces[i,j];\n \n plt.imshow(im);\n plt.show();", "def draw_tree(self):\n nx.draw(self.diffusion_tree, with_labels=True)", "def bprint(self):\n\t\tpcolor = [\n\t\t\t(0, 0, 255, 255),\n\t\t\t(255, 0, 0, 255),\n\t\t\t(0, 255, 0, 255),\n\t\t\t(255, 255, 0, 255),\n\t\t\t(0, 255, 255, 255),\n\t\t\t(255, 140, 0, 255),\n\t\t\t(140, 0, 255, 255),\n\t\t\t(255, 0, 255, 255)\n\t\t]\n\t\timg = Image.open(bundled_data_path(self.cog) / 'img.png')\n\t\td = ImageDraw.Draw(img)\n\t\t#OWNEDBY\n\t\tfor t in range(40):\n\t\t\tif self.ownedby[t] > -1:\n\t\t\t\tif 0 < t < 10:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-39,702,(650-(t*50))-10,735],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-37,702,(650-(t*50))-12,733],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t\t\telif 10 < t < 20:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[16,(650-((t-10)*50))-39,50,(650-((t-10)*50))-10],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[18,(650-((t-10)*50))-37,50,(650-((t-10)*50))-12],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t\t\telif 20 < t < 30:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+11,16,(100+((t-20)*50))+41,50],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+13,18,(100+((t-20)*50))+39,50],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t\t\telif 30 < t < 40:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[702,(100+((t-30)*50))+11,736,(100+((t-30)*50))+41],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[702,(100+((t-30)*50))+13,734,(100+((t-30)*50))+39],\n\t\t\t\t\t\tfill=pcolor[self.ownedby[t]]\n\t\t\t\t\t)\n\t\t#TILE\n\t\t#Because the player int used to be 1 indexed, the players would be in the wrong\n\t\t#position without 1 indexing and subtracting 1 from t when calling self.tile[t]\n\t\t#and pcolor[t]. I could fix this by changing the hard coded values, but this is\n\t\t#easier in the short term.\n\t\tfor t in range(1, self.num + 1):\n\t\t\tif not self.isalive[t-1]:\n\t\t\t\tcontinue\n\t\t\tif self.tile[t-1] == 0:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[(12*(t-1))+604,636,(12*(t-1))+614,646], fill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[(12*(t-1))+605,637,(12*(t-1))+613,645], fill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 0 < self.tile[t-1] < 10:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-47)+(12*(t-1)),636,((650-(self.tile[t-1]*50))-37)+(12*(t-1)),646],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-46)+(12*(t-1)),637,((650-(self.tile[t-1]*50))-38)+(12*(t-1)),645],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-47)+(12*(t-5)),648,((650-(self.tile[t-1]*50))-37)+(12*(t-5)),658],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((650-(self.tile[t-1]*50))-46)+(12*(t-5)),649,((650-(self.tile[t-1]*50))-38)+(12*(t-5)),657],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\telif self.tile[t-1] == 10:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[106,(12*(t-1))+604,116,(12*(t-1))+614],\n\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[107,(12*(t-1))+605,115,(12*(t-1))+613],\n\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 10 < self.tile[t-1] < 20:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[106,((650-((self.tile[t-1]-10)*50))-47)+(12*(t-1)),116,((650-((self.tile[t-1]-10)*50))-37)+(12*(t-1))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[107,((650-((self.tile[t-1]-10)*50))-46)+(12*(t-1)),115,((650-((self.tile[t-1]-10)*50))-38)+(12*(t-1))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[94,((650-((self.tile[t-1]-10)*50))-47)+(12*(t-5)),104,((650-((self.tile[t-1]-10)*50))-37)+(12*(t-5))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[95,((650-((self.tile[t-1]-10)*50))-46)+(12*(t-5)),103,((650-((self.tile[t-1]-10)*50))-38)+(12*(t-5))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\telif self.tile[t-1] == 20:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[138-(12*(t-1)),106,148-(12*(t-1)),116],\n\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[139-(12*(t-1)),107,147-(12*(t-1)),115],\n\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 20 < self.tile[t-1] < 30:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+39)-(12*(t-1)),106,((100+((self.tile[t-1]-20)*50))+49)-(12*(t-1)),116],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+40)-(12*(t-1)),107,((100+((self.tile[t-1]-20)*50))+48)-(12*(t-1)),115],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+39)-(12*(t-5)),94,((100+((self.tile[t-1]-20)*50))+49)-(12*(t-5)),104],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[((100+((self.tile[t-1]-20)*50))+40)-(12*(t-5)),95,((100+((self.tile[t-1]-20)*50))+48)-(12*(t-5)),103],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\telif self.tile[t-1] == 30:\n\t\t\t\td.rectangle(\n\t\t\t\t\t[636,138-(12*(t-1)),646,148-(12*(t-1))],\n\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t)\n\t\t\t\td.rectangle(\n\t\t\t\t\t[637,139-(12*(t-1)),645,147-(12*(t-1))],\n\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t)\n\t\t\telif 30 < self.tile[t-1] < 40:\n\t\t\t\tif t < 5:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[636,((100+((self.tile[t-1]-30)*50))+39)-(12*(t-1)),646,((100+((self.tile[t-1]-30)*50))+49)-(12*(t-1))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[637,((100+((self.tile[t-1]-30)*50))+40)-(12*(t-1)),645,((100+((self.tile[t-1]-30)*50))+48)-(12*(t-1))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[648,((100+((self.tile[t-1]-30)*50))+39)-(12*(t-5)),658,((100+((self.tile[t-1]-30)*50))+49)-(12*(t-5))],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[649,((100+((self.tile[t-1]-30)*50))+40)-(12*(t-5)),657,((100+((self.tile[t-1]-30)*50))+48)-(12*(t-5))],\n\t\t\t\t\t\tfill=pcolor[t-1]\n\t\t\t\t\t)\n\t\t#NUMHOUSE\n\t\tfor t in range(40):\n\t\t\tif self.numhouse[t] == 5:\n\t\t\t\tif 0 < t < 10:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-33,606,(650-(t*50))-15,614],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(650-(t*50))-32,607,(650-(t*50))-16,613],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\t\telif 10 < t < 20:\t\t\t\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[138,(650-((t-10)*50))-33,146,(650-((t-10)*50))-17],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[139,(650-((t-10)*50))-32,145,(650-((t-10)*50))-18],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\t\telif 20 < t < 30:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+17,138,(100+((t-20)*50))+35,146],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[(100+((t-20)*50))+18,139,(100+((t-20)*50))+34,145],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\t\telif 30 < t < 40:\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[606,(100+((t-30)*50))+17,614,(100+((t-30)*50))+35],\n\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t)\n\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t[607,(100+((t-30)*50))+18,613,(100+((t-30)*50))+34],\n\t\t\t\t\t\tfill=(255,0,0,255)\n\t\t\t\t\t)\n\t\t\telif self.numhouse[t] > 0:\n\t\t\t\tfor tt in range(self.numhouse[t]):\n\t\t\t\t\tif 0 < t < 10:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((650-(t*50))-47)+(tt*12),606,((650-(t*50))-37)+(tt*12),614],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((650-(t*50))-46)+(tt*12),607,((650-(t*50))-38)+(tt*12),613],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\telif 10 < t < 20:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[138,((650-((t-10)*50))-47)+(tt*12),146,((650-((t-10)*50))-37)+(tt*12)],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[139,((650-((t-10)*50))-46)+(tt*12),145,((650-((t-10)*50))-38)+(tt*12)],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\telif 20 < t < 30:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((100+((t-20)*50))+39)-(tt*12),138,((100+((t-20)*50))+49)-(tt*12),146],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[((100+((t-20)*50))+40)-(tt*12),139,((100+((t-20)*50))+48)-(tt*12),145],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\telif 30 < t < 40:\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[606,((100+((t-30)*50))+39)-(tt*12),614,((100+((t-30)*50))+49)-(tt*12)],\n\t\t\t\t\t\t\tfill=(0,0,0,255)\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.rectangle(\n\t\t\t\t\t\t\t[607,((100+((t-30)*50))+40)-(tt*12),613,((100+((t-30)*50))+48)-(tt*12)],\n\t\t\t\t\t\t\tfill=(0,255,0,255)\n\t\t\t\t\t\t)\n\t\t#END\n\t\ttemp = BytesIO()\n\t\ttemp.name = 'board.png'\n\t\timg.save(temp)\n\t\ttemp.seek(0)\n\t\treturn temp", "def show_digit( Pixels ):\n from matplotlib import pyplot as plt\n print(Pixels.shape)\n Patch = Pixels.reshape((8,8))\n plt.figure(1, figsize=(4,4))\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # plt.cm.gray_r # plt.cm.hot\n plt.show()", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def show_binary(img_bin, des_img_pos, new_img_pos, img_thres):\n img_bgr = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB)\n #draw tolerance lines\n #left/right vertical lines\n xl = des_img_pos[0] - img_thres\n xr = des_img_pos[0] + img_thres\n y1 = 0\n y2 = img_shape[1]\n cv2.line(img_bgr,(xl,y1),(xl,y2),(0,255,0),1)\n cv2.line(img_bgr,(xr,y1),(xr,y2),(0,255,0),1)\n #top/bottom horizontal lines\n yt = des_img_pos[1] - img_thres\n yb = des_img_pos[1] + img_thres\n x1 = 0\n x2 = img_shape[0]\n cv2.line(img_bgr,(x1,yt),(x2,yt),(0,255,0),1)\n cv2.line(img_bgr,(x1,yb),(x2,yb),(0,255,0),1)\n #draw circle at detected object\n cv2.circle(img_bgr,tuple(new_img_pos),6,(255,0,0),2)\n #show image\n cv2.imshow(window_name, img_bgr)\n cv2.waitKey(1000) & 0xFF", "def draw(self, state):\n if state is None:\n state = self.model.current_state\n for row in range(len(self.model.maze.walls)):\n self.__draw_row_division()\n print(\" {0:2d} \".format(row), end='') # Imprime número da linha\n\n for col in range(len(self.model.maze.walls[0])):\n if self.model.maze.walls[row][col] == 1:\n print(\"|XXX\", end='') # Desenha parede\n elif self.model.goal_state.get_element(Coordinate(row, col)):\n if state.player.row == row and state.player.col == col:\n print(\"|G-P\", end='') # Desenha objetivo e jogador.\n elif state.get_element(Coordinate(row, col)):\n print(\"|G-B\", end='') # Desenha objetivo e caixa.\n else:\n print(\"| G\", end='') # Desenha objetivo\n elif state.player.row == row and state.player.col == col:\n print(\"| P\", end='') # Desenha jogador\n elif state.get_element(Coordinate(row, col)):\n print(\"| B\", end='') # Desenha caixa.\n else:\n print(\"| \", end='') # Desenha vazio\n print(\"|\")\n if row == (len(self.model.maze.walls) - 1):\n self.__draw_row_division()", "def show(type,img):\n # print(img)\n cv2.imshow(type, img)\n cv2.waitKey()", "def make_neighbor_list(self):\n nodeinfo = bytearray()\n\n # the node itself\n for item in self.neighbors.my_info.get_nodeinfo():\n nodeinfo.extend(item)\n count = 1\n\n # neighboring node\n for nd in self.neighbors.nodeinfo_list.keys():\n if self.neighbors.nodeinfo_list[nd].is_alive:\n count += 1\n for item in self.neighbors.nodeinfo_list[nd].get_nodeinfo():\n nodeinfo.extend(item)\n\n nodes = bytearray(count.to_bytes(4, 'big'))\n nodes.extend(nodeinfo)\n return bytes(nodes)", "def show_mask(image, mask): \n plt.subplot(1,2,1)\n plt.title('image')\n plt.imshow(image)\n plt.subplot(1,2,2)\n plt.title('mask')\n plt.imshow(mask)\n plt.show()", "def view(self):\n\t\tfigure_out = self.figure.copy()\n\t\timage_pairs = np.unique(self.local_matches[\"image_pairs\"][0])\n\t\tfor i in image_pairs:\n\t\t\t# draw bounding box\n\t\t\ti_loc = self.local_database[\"image_locs\"][np.where(self.local_database[\"image_idx\"] == i)[0][0]]\n\t\t\tcv2.rectangle(figure_out, (int(i_loc[0]), int(i_loc[1])), (int(i_loc[0]+i_loc[2]), int(i_loc[1]+i_loc[3])),\n\t\t\t\t\t\t color = (255,0,0), thickness=5)\n\t\t\t# label matches text\n\t\t\tcv2.putText(figure_out, str(i), (int(i_loc[0]-50), int(i_loc[1] + 50)), cv2.FONT_HERSHEY_SIMPLEX, 2,\n\t\t\t\t\t color=(255,0,0), thickness=7)\n\t\tself.save_figure(figure_out)", "def getNeighboursInfo(self,pubkey=None):\n return list(map(self.GetNodeInfo, self.getNeighboursPubkeys(pubkey)))" ]
[ "0.7047486", "0.6181761", "0.6136457", "0.61287487", "0.59386134", "0.59066224", "0.5844902", "0.5810311", "0.5801892", "0.57685393", "0.5745063", "0.5735822", "0.5733103", "0.57241136", "0.57115674", "0.56976724", "0.56679213", "0.56657785", "0.5641514", "0.56204975", "0.55908644", "0.5574266", "0.55739707", "0.5572492", "0.55564094", "0.55529624", "0.55504614", "0.55486983", "0.55485576", "0.5541845", "0.55269814", "0.5508617", "0.5501754", "0.5489919", "0.54807556", "0.54793555", "0.54751813", "0.5471542", "0.5465266", "0.54627717", "0.5456206", "0.54527915", "0.545248", "0.5448747", "0.54482967", "0.5441692", "0.5441692", "0.5441692", "0.54326427", "0.5421133", "0.5417497", "0.54159516", "0.541517", "0.5408789", "0.5405921", "0.5405552", "0.54001814", "0.53970814", "0.53937066", "0.5393144", "0.5391314", "0.5390024", "0.538283", "0.53822297", "0.5374772", "0.5373997", "0.5368876", "0.5362184", "0.5360859", "0.53579426", "0.5353143", "0.53491807", "0.53458846", "0.53454614", "0.5336579", "0.5336322", "0.5328074", "0.53264767", "0.5325823", "0.53247607", "0.5324748", "0.5321731", "0.5319175", "0.5312728", "0.5304066", "0.5301355", "0.5298168", "0.5297403", "0.5295537", "0.52950734", "0.52950203", "0.52939963", "0.5293239", "0.529233", "0.5291724", "0.5283361", "0.52825755", "0.5280233", "0.52800375", "0.5279167" ]
0.73047006
0
subpaths is a list of paths on tail nodes. return a new path generated by concatenating this edge. this is used in kbest paths generation.
def make_path(self, subpaths): assert len(self.tail) == len(subpaths), '%s' % self path = Path(self, subpaths) weight = self.hg.one for p in subpaths: if p is not None: weight = self.hg.prod(weight, p.weight) weight = self.hg.prod(weight, self.hg.w(self)) path.weight = weight return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decompose_paths_rec(node_inner, path):\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def toSubpathPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def get_subgraph_from_paths(self, paths):\n nodes, edges = graph_elements_from_paths(paths)\n subgraph = self.graph.subgraph(nodes).edge_subgraph(edges)\n return subgraph", "def subpath(self):\n return self._subpath()", "def sub_path(self) -> str:\n return self._sub_path", "def extract_path(self):\n if self.extracted_path is not None:\n return self.extracted_path\n current = self\n path = []\n while current:\n path.append([current.end, current.path_cost])\n current = current.parent\n return list(reversed(path))", "def acyclic_sub_path(tree, path):\n for u, v in pairwise(reversed(path)):\n if v in tree.nodes and u not in tree.nodes:\n return path[path.index(v):]", "def find_all_subpaths(all_paths):\r\n # Calculate length of the maximum path\r\n max_length = max(len(s) for s in all_paths)\r\n\r\n subpaths = set()\r\n for path in all_paths:\r\n for k in range(0, max_length + 1):\r\n for ii in range(0, len(path) - k + 1):\r\n subpaths.add(tuple(path[ii:ii + k]))\r\n subpaths = filter(None, subpaths)\r\n return list(subpaths)", "def get_paths_of_length_k(subpaths, k):\r\n subpaths_of_length_k = [i for i in subpaths if len(\r\n i) == k] # all k-length subpaths\r\n subpaths = [i for i in subpaths if len(i) != k] # remove k-length subpaths\r\n return subpaths_of_length_k, subpaths", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "def extend_path(self, end, path_cost):\n return self.combine(Action(end, path_cost))", "def sub_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sub_path\")", "def appendPath(paths: List[unicode]) -> unicode:\n ...", "def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def OptimalSubsequenceWarpingPath( self ):\n subseqCandidates = []\n subseqCosts = []\n\n lastRow = list(self.D[-1])\n bStar = lastRow.index( min(lastRow) )\n while lastRow[bStar] < self.maxPathLen or len(subseqCosts) == 0:\n # find aStar with minimum distance for subsequences ending at bStar\n P, cost = self.OptimalWarpingPath( bStar )\n subseqCandidates.append( P )\n subseqCosts.append( cost )\n lastRow[bStar] = float(\"inf\")\n bStar = lastRow.index( min(lastRow) ) \n minCost = min(subseqCosts)\n return subseqCandidates[ subseqCosts.index( minCost ) ], minCost", "def get_path_ends(self):\n\n end1, end2 = self.get_end_vertices()\n\n return Path(end1), Path(end2)", "def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def save_subpath(self, index, result_path='', subPath=''):\n pass", "def build_path(\n G: nx.Graph,\n node: int,\n endpoints: List[int],\n path: List[int]) -> List[int]:\n\n # For each successor in the passed-in node\n for successor in G.successors(node):\n if successor not in path:\n # If successor is already in path, ignore it, otherwise add to path\n path.append(successor)\n\n if successor not in endpoints:\n # If successor not endpoint, recursively call\n # build_path until endpoint found\n path = build_path(G, successor, endpoints, path)\n\n else:\n # If successor is endpoint, path is completed, so return\n return path\n\n if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):\n # If end of the path is not actually an endpoint and the path's\n # first node is a successor of the path's final node, then this is\n # actually a self loop, so add path's first node to end of path to\n # close it\n path.append(path[0])\n\n return path", "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def cross(subpaths, j, k):\r\n for q in range(j, k):\r\n subpaths[q].direct_close()\r\n subpaths[q].reverse()\r\n subpaths[j:k] = subpaths[j:k][::-1]", "def edge_subgraph(self, edges, relabel_nodes=False, output_device=None):\n raise NotImplementedError(\"edge_subgraph is not implemented yet\")", "def interjoint_paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n subpaths = self._single_tree_interjoint_paths(\n tree, return_indices=return_indices\n )\n paths.extend(subpaths)\n\n return paths", "def join(self, path, *paths):", "def add_path(self, path):\n\n for i in range(1, len(path)):\n self.add_edge(path[i], path[i - 1])", "def sub_link_capacity(self, path, bw):\n \n # PART 1, TASK 3.4 sub bw to edges", "def subgraph(self, nodes, relabel_nodes=False, output_device=None):\n raise NotImplementedError(\"subgraph is not implemented yet\")", "def multi_join(paths, *path_segments):\n return [os.path.join(*(path_segments + (path,))) for path in paths]", "def compute_longest_syntactic_path(self, add_cut):\n max_cost, maxpath_node_uids, subgraph_node_uids, subgraph_edge_uids = self._compute_longest_path_cut(self._start_uid, self._end_uid)\n cut_uid = Cut.get_cut_uid(self._start_uid, self._end_uid)\n if add_cut and cut_uid not in self._cuts.keys():\n c = Cut(self._start_uid, self._end_uid, max_cost, subgraph_node_uids, subgraph_edge_uids, self)\n self._cuts[c.get_uid()] = c\n return max_cost, maxpath_node_uids, subgraph_node_uids, subgraph_edge_uids", "def build_path(cask_node, nodes):\n if cask_node.parent.name != 'ABC':\n nodes.insert(0, cask_node.parent.name)\n build_path(cask_node.parent, nodes)\n return nodes", "def drawPath(self, path=[]):\n subpath = NSBezierPath.alloc().init()\n subpath.moveToPoint_(path[0][0])\n for p in path[1:]:\n if len(p) == 3:\n # curve\n A, B, C = p\n subpath.curveToPoint_controlPoint1_controlPoint2_(C, A, B)\n else:\n subpath.lineToPoint_(p[0])\n\n subpath.closePath()\n NSColor.colorWithCalibratedRed_green_blue_alpha_(\n 0, 0, 1, self.alpha\n ).set()\n subpath.stroke()", "def append_paths(main_paths, paths):\n\tpaths = {key: np.vstack((main_paths[key], paths[key])) for key in main_paths.keys()}\n\treturn paths", "def path_to(self, other: \"BaseSegment\") -> List[PathStep]:\n # Return empty if they are the same segment.\n if self is other:\n return [] # pragma: no cover\n\n # Do we have any child segments at all?\n if not self.segments:\n return []\n\n # Identifying the highest parent we can using any preset parent values.\n midpoint = other\n lower_path = []\n while True:\n _higher = midpoint.get_parent()\n # If we've run out of parents, stop for now.\n if not _higher:\n break\n lower_path.append(\n PathStep(\n _higher,\n _higher.segments.index(midpoint),\n len(_higher.segments),\n _higher._code_indices,\n )\n )\n midpoint = _higher\n # If we're found the target segment we can also stop.\n if midpoint == self:\n break\n\n # Reverse the path so far\n lower_path.reverse()\n\n # Have we already found the parent?\n if midpoint == self:\n return lower_path\n # Have we gone all the way up to the file segment?\n elif midpoint.class_is_type(\"file\"):\n return [] # pragma: no cover\n # Are we in the right ballpark?\n # NOTE: Comparisons have a higher precedence than `not`.\n elif not self.get_start_loc() <= midpoint.get_start_loc() <= self.get_end_loc():\n return []\n\n # From here, we've worked \"up\" as far as we can, we now work \"down\".\n # When working down, we only need to go as far as the `midpoint`.\n\n # Check through each of the child segments\n for idx, seg in enumerate(self.segments):\n # Set the parent if it's not already set.\n seg.set_parent(self)\n # Build the step.\n step = PathStep(self, idx, len(self.segments), self._code_indices)\n # Have we found the target?\n # NOTE: Check for _equality_ not _identity_ here as that's most reliable.\n if seg == midpoint:\n return [step] + lower_path\n # Is there a path to the target?\n res = seg.path_to(midpoint)\n if res:\n return [step] + res + lower_path\n\n # Not found.\n return [] # pragma: no cover", "def shorter_path(start, goal):\n if start == goal:\n return [start]\n explored = set() \n queue = [ [start] ] \n while queue:\n path = queue.pop(0)\n s = path[-1]\n for state, action in bj_subway[s].items():\n if state not in explored:\n explored.add(state)\n path2 = path + [action, state]\n if state == goal:\n\t\t\t\t\t# print path2\n\t\t\t\t\t# for x in queue:\n\t\t\t\t\t# print x\n\t\t\t\t\treturn path2\n else:\n queue.append(path2)\n return []", "def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path", "def select_paths(self, dpaths=None):\r\n \r\n locs = [] # Find list of starting locs\r\n if len(dpaths) == 0:\r\n return [], []\r\n \r\n for dpath in dpaths:\r\n locs.append(dpath.path[0]) # Get starting loc\r\n \r\n start_locs = self.get_starts(locs=locs)\r\n start_paths = [] # Get paths with these starting locs\r\n other_paths = []\r\n for dpath in dpaths:\r\n if dpath.path[0] in start_locs:\r\n start_paths.append(dpath)\r\n else:\r\n other_paths.append(dpath)\r\n last = self.get_val(\"last\")\r\n if last is 0 or last == \"\":\r\n last = len(dpaths)\r\n closed_tour = self.get_val(\"closed_tour\")\r\n not_tour = self.get_val(\"not_tour\")\r\n comp = self.get_val(\"comp\") \r\n not_comp = self.get_val(\"not_comp\") \r\n\r\n arr_list = start_paths\r\n other_list = other_paths\r\n if closed_tour or not_tour:\r\n a_list = []\r\n o_list = []\r\n for ad in arr_list:\r\n used = False\r\n is_tour = ad.is_closed_tour\r\n if closed_tour:\r\n if is_tour:\r\n a_list.append(ad)\r\n used = True\r\n if not_tour:\r\n if not is_tour:\r\n a_list.append(ad)\r\n used = True\r\n if not used:\r\n o_list.append(ad)\r\n arr_list = a_list\r\n other_list += o_list \r\n \r\n if comp or not_comp:\r\n a_list = []\r\n o_list = []\r\n for ad in arr_list:\r\n used = False\r\n is_comp = ad.is_complete_tour\r\n if comp:\r\n if is_comp:\r\n a_list.append(ad)\r\n used = True\r\n if not_comp:\r\n if not is_comp:\r\n a_list.append(ad)\r\n used = True\r\n if not used:\r\n o_list.append(ad)\r\n arr_list = a_list\r\n other_list += o_list\r\n self.prev_arr_list = arr_list \r\n return arr_list, other_list", "def manage_paths(node, paths) :\r\n\r\n #Getting the nodes neighbouring the given node\r\n neighbours = get_neighbouring_nodes(node) \r\n\r\n #Creating a new path branch\r\n new_path = [] #The new path\r\n path_found = False #Indicates whether the path to which the node belongs has been found\r\n\r\n #Looping through the neighbours\r\n for neighbour in neighbours :\r\n for path in paths :\r\n #Checking whether the path contains the neighbour\r\n if(neighbour in path) :\r\n index = path.index(neighbour)\r\n #Checking if the branch belongs to the current path\r\n if(path[index].gn_value == neighbour.gn_value) :\r\n new_path = path[:index + 1] + [node] #Creating a new path branch\r\n new_path[-1].gn_value = new_path.__len__() - 1 #Updating the node's g(n) value\r\n path_found = True\r\n break\r\n if(path_found) :\r\n break\r\n \r\n if(not path_found) :\r\n raise Exception(\"No branch junction found\")\r\n\r\n #Setting the new path as the current path\r\n return new_path", "def join_path(self, path_parts):\n return os.path.sep.join(path_parts)", "def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)", "def enable_sub_path(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_sub_path\")", "def select_path(self, pathlen):\r\n self.entry_gen.rewind()\r\n self.mid_gen.rewind()\r\n self.exit_gen.rewind()\r\n entry = self.entry_gen.generate()\r\n mid = self.mid_gen.generate()\r\n ext = self.exit_gen.generate()\r\n \r\n plog(\"DEBUG\", \"Selecting path..\")\r\n\r\n while True:\r\n path = []\r\n plog(\"DEBUG\", \"Building path..\")\r\n try:\r\n if pathlen == 1:\r\n path = [ext.next()]\r\n else:\r\n path.append(entry.next())\r\n for i in xrange(1, pathlen-1):\r\n path.append(mid.next())\r\n path.append(ext.next())\r\n if self.path_restrict.path_is_ok(path):\r\n self.entry_gen.mark_chosen(path[0])\r\n for i in xrange(1, pathlen-1):\r\n self.mid_gen.mark_chosen(path[i])\r\n self.exit_gen.mark_chosen(path[pathlen-1])\r\n plog(\"DEBUG\", \"Marked path.\")\r\n break\r\n else:\r\n plog(\"DEBUG\", \"Path rejected by path restrictions.\")\r\n except StopIteration:\r\n plog(\"NOTICE\", \"Ran out of routers during buildpath..\");\r\n self.entry_gen.rewind()\r\n self.mid_gen.rewind()\r\n self.exit_gen.rewind()\r\n entry = self.entry_gen.generate()\r\n mid = self.mid_gen.generate()\r\n ext = self.exit_gen.generate()\r\n for r in path:\r\n r.refcount += 1\r\n plog(\"DEBUG\", \"Circ refcount \"+str(r.refcount)+\" for \"+r.idhex)\r\n return path", "def reconstruct_path(current):\r\n path = [current.coord]\r\n parent = current.parent\r\n while parent:\r\n path = [parent.coord] + path\r\n parent = parent.parent\r\n path = path[1:]\r\n return path", "def subpath_sim(subpaths_1, subpaths_2):\n u = subpaths_1.union(subpaths_2)\n f1 = np.zeros(len(u))\n f2 = np.zeros(len(u))\n u = list(u)\n\n # convert graph into one-hot-vector (based on the precense of subpaths)\n for i in range(len(u)):\n if u[i] in subpaths_1:\n f1[i] = 1\n if u[i] in subpaths_2:\n f2[i] = 1\n\n score = np.dot(f1, f2) * (np.count_nonzero(f1) + np.count_nonzero(f2)) / (2 * (np.count_nonzero(f1) * np.count_nonzero(f2)))\n\n if math.isnan(score): # in case of empty set\n return 0.0\n else:\n return score", "def decompose_paths(self):\n if self.child_nodes == {}:\n return []\n\n import numpy as np\n\n def decompose_paths_rec(node_inner, path):\n \"\"\"\n This function does the recursive create_path of the decomposition\n :param node_inner:\n :param path:\n \"\"\"\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths\n\n decomposition = decompose_paths_rec(self, np.array([]))\n return decomposition.reshape((decomposition.shape[0]/(self.d+1), self.d+1))", "def _all_paths_iterator(self, vertex, ending_vertices=None,\n simple=False, max_length=None, trivial=False):\n if ending_vertices is None:\n ending_vertices = self\n if max_length is None:\n from sage.rings.infinity import Infinity\n max_length = Infinity\n if max_length < 1:\n return\n\n # Start with the empty path; we will try all extensions of it\n queue = []\n path = [vertex]\n\n if trivial and vertex in ending_vertices:\n yield path\n while True:\n # Build next generation of paths, one arc longer; max_length refers\n # to edges and not vertices, hence <= and not <\n if len(path) <= max_length:\n\n # We try all possible extensions\n if simple:\n # We only keep simple extensions. An extension is simple\n # iff the new vertex being entered has not previously\n # occurred in the path, or has occurred but only been\n # exited (i.e. is the first vertex in the path). In this\n # latter case we must not exit the new vertex again, so we\n # do not consider it for further extension, but just yield\n # it immediately. See trac #12385.\n for neighbor in self.neighbor_out_iterator(path[-1]):\n if neighbor not in path:\n queue.append(path + [neighbor])\n elif ( neighbor == path[0] and\n neighbor in ending_vertices ):\n yield path + [neighbor]\n\n else:\n # Non-simple paths requested: we add all of them\n for neighbor in self.neighbor_out_iterator(path[-1]):\n queue.append(path + [neighbor])\n\n if not queue:\n break\n path = queue.pop(0) # get the next path\n\n if path[-1] in ending_vertices:\n yield path # yield good path", "def concatenate(paths):\n # if only one path object just return copy\n if len(paths) == 1:\n return paths[0].copy()\n\n # length of vertex arrays\n vert_len = np.array([len(i.vertices) for i in paths])\n # how much to offset each paths vertex indices by\n offsets = np.append(0.0, np.cumsum(vert_len))[:-1].astype(np.int64)\n\n # resulting entities\n entities = []\n # resulting vertices\n vertices = []\n # resulting metadata\n metadata = {}\n for path, offset in zip(paths, offsets):\n # update metadata\n metadata.update(path.metadata)\n # copy vertices, we will stack later\n vertices.append(path.vertices.copy())\n # copy entity then reindex points\n for entity in path.entities:\n entities.append(entity.copy())\n entities[-1].points += offset\n\n # generate the single new concatenated path\n # use input types so we don't have circular imports\n concat = type(path)(metadata=metadata,\n entities=entities,\n vertices=np.vstack(vertices))\n return concat", "def get_path_endpoints(self):\n endpoints = []\n\n # Get the far end of the last path segment\n path, split_ends, position_stack = self.trace()\n endpoint = path[-1][2]\n if split_ends is not None:\n for termination in split_ends:\n endpoints.extend(termination.get_path_endpoints())\n elif endpoint is not None:\n endpoints.append(endpoint)\n\n return endpoints", "def reconstruct_path_to_destination(prev, end):\n path = [end]\n curr = end\n while curr in prev.keys():\n curr = prev[curr]\n path.insert(0, curr)\n return path", "def reconstruct_path_to_destination(prev, end):\n path = [end]\n curr = end\n while curr in prev.keys():\n curr = prev[curr]\n path.insert(0, curr)\n return path", "def _backtrack_path(self, current=None):\n path = []\n if self.path_found:\n if not current:\n current = self.end_node\n path.append(self.end_node)\n while current.parent:\n path.append(current.parent)\n self.board_array[current.parent.y][current.parent.x] = 4\n current = current.parent\n self.board_array[self.start_node.y][self.start_node.x] = 2\n self.board_array[self.end_node.y][self.end_node.x] = 3\n return path", "def _subgraph(self):\n return {\n 'type': 'SubgraphTask',\n 'id': 0,\n 'dependencies': [],\n 'parameters': {\n 'info': {},\n 'current_retries': 0,\n 'send_task_events': False,\n 'containing_subgraph': None,\n 'task_kwargs': {}\n }\n }", "def join_path(tuple_path):\n return os.path.join(tuple_path[1], tuple_path[1] + tuple_path[2])", "def path_add_subreddit(self, subreddit):\r\n if not self.path_has_subreddit() and subreddit.path != '/categories/':\r\n self.path = (subreddit.path + self.path)\r\n return self", "def coalesce_paths(self, child_paths, t0):\n assert len(child_paths)==2, 'Can only coalesce 2 pathogen lineages at a time'\n p1, p2 = child_paths\n\n assert p1 in self.extant_p and p2 in self.extant_p, 'Both pathogen lineages must be extant'\n assert p1.host == p2.host, 'Can only coalesce pathogen lineages in the same host'\n host = p1.host\n\n assert p1.height < t0 and p2.height < t0, \\\n 'Pathogen lineage heights %f %f cannot exceed coalescent event %f' % (p1.height, p2.height, t0)\n\n # create new pathogen lineage\n new_path = TreeNode(name='_'.join([x.name for x in child_paths]), dist=0)\n new_path.add_features(host=host, height=t0)\n\n # cast child_paths as a List because ete3.Tree.children requires it\n new_path.children = list(child_paths)\n self.extant_p.append(new_path)\n\n # coalesced pathogen lineages are no longer extant\n for node in child_paths:\n node.up = new_path\n node.dist = t0 - node.height # when node was created, we stored the height\n self.extant_p.remove(node)\n self.not_extant_p.append(node)\n\n return new_path", "def possible_subpeptides(self):\n ret = [\"\"]\n protein_len = len(self.protein)\n for l in range(1, protein_len):\n for i in range(protein_len):\n if i + l <= protein_len:\n ret += [self.protein[i : i+l]]\n else:\n ret += [self.protein[i:] + self.protein[:(i+l)%protein_len]]\n ret += [self.protein]\n return ret", "def __combine_path(self, other):\n self.path = other.path + self.path", "def getPaths(self):\n\n trafficEndPoints = []\n # A job denotes a traffic flow, which corresponds to an iperf task.\n for job in self.config.trace.jobs:\n trafficEndPoints.append((job['src'], job['dst']))\n\n # Obtain details about user-specified non-default links.\n configuredLinks = []\n for linkInfo in self.config.topoData['linkInfos']:\n configuredLinks.append((linkInfo['src'], linkInfo['dst']))\n\n paths = None\n spec = self.config.topoData['flowSpec']\n if spec == 'shortest_path':\n # export paths info and create routing conf using shortest paths\n adjFile = self.config.adjacencyFile\n writeAdjList(self.net, adjFile)\n info(\"**** [G2]: adjacency list written to file\", adjFile, \"\\n\")\n\n outfile = os.path.join(self.config.outPath, SHORTEST_PATH_FILE)\n paths = generateShortestPaths(adjFile, outfile, trafficEndPoints, configuredLinks)\n info(\"**** [G2]: shortest paths written to file\", outfile, \"\\n\")\n # Note: Since there can be multiple shortest paths between two endpoints, solution could vary.\n elif \".json\" in spec:\n info(\"**** [G2]: reading path info from\", spec, \"\\n\")\n paths = readFromPathFile(spec)\n else:\n paths = None\n return paths", "def sub_pod_path(self):\n return self.pod_path[len(self.source_path):]", "def compute_path(self,\n path):\n self.subidx = ~np.isnan(self.y_data)\n leaf_id = 0\n for decision in path:\n node = self.tree[leaf_id]\n if decision == 0:\n leaf_id = node.id_null\n self.subidx = (self.subidx &\n np.isnan(self.split_data[node.label]))\n else:\n non_null = ~np.isnan(self.split_data[node.label])\n new_subidx = np.zeros_like(self.split_data[node.label],\n dtype=bool)\n if decision < 0:\n leaf_id = node.id_lower\n new_subidx[non_null] = \\\n self.split_data[node.label][non_null] < node.cutoff\n elif decision > 0:\n leaf_id = node.id_higher\n new_subidx[non_null] = \\\n self.split_data[node.label][non_null] > node.cutoff\n self.subidx = self.subidx & new_subidx\n\n self.sub_y_data = self.y_data[self.subidx]\n self.sub_split_data = self.split_data[:, self.subidx]\n self.sub_bin_data = self.bin_data[self.subidx]\n self.sub_y_ranks = rankdata(self.sub_y_data)\n self.sub_split_args = self.sub_split_data.argsort(axis=1)", "def combine_paths(paths: Iterable[str], prepend: str, separator: str) -> str:\n\n paths = [\"{}{}\".format(prepend, p) for p in paths]\n return separator.join(paths)", "def test__extend_paths():\n file_paths = [\"docs/abcd/\", \"docs/123/\"]\n\n extend_paths = classifier_module.Classifier._extend_paths\n path_element = \"u/\"\n extended_paths = extend_paths(file_paths, path_element)\n\n assert len(file_paths) == len(extended_paths)\n for path_num in range(len(file_paths)):\n assert file_paths[path_num] + path_element == extended_paths[path_num]", "def url_subpath(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n forbidden = ''.join(sorted(forbidden))\n raise ValueError('%(s)r contains forbidden characters'\n ' (%(forbidden)r)'\n % locals())\n stripped = normpath(s).lstrip(sep)\n if stripped == curdir:\n return ''\n if sep != '/':\n return stripped.replace(sep, '/')\n return stripped", "def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths", "def joinPath(path, *args):", "def pathTo(self, v): # O(# edges returned)\n if self.hasNegativeCycle():\n raise Exception(\"Negative cost cycle exists\")\n if not self.hasPathTo(v): return None\n path = [] # new Stack<DirectedEdge>()\n e = self._edgeTo[v]\n while e is not None: \n path.append(e) # push(e)\n e = self._edgeTo[e.get_from()]\n return path", "def _subtour(edges,n):\n visited = [False]*n\n cycles = []\n costs = []\n selected = [[] for i in range(n)]\n for x,y in edges:\n selected[x].append(y)\n while True:\n current = visited.index(False)\n thiscycle = [current]\n while True:\n visited[current] = True\n neighbors = [x for x in selected[current] if not visited[x]]\n if len(neighbors) == 0:\n break\n current = neighbors[0]\n thiscycle.append(current)\n cycles.append(thiscycle)\n costs.append(len(thiscycle))\n if sum(costs) == n:\n break\n return cycles[costs.index(min(costs))]", "def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths", "def __path_to_end(self) -> List[List[int]]:\n predecessors = self.__predecessors_list()\n path = []\n\n row_exit, col_exit = Player.find_exit_position(self.__labyrinth)\n dest = self.__convert_position(row_exit, col_exit)\n\n v = dest\n\n path.append([v // 10, v % 10])\n\n while predecessors[v] != -1:\n path.append(predecessors[v])\n v = self.__convert_position(predecessors[v][0], predecessors[v][1])\n\n return path[::-1]", "def a_path(t,x):\n if label(t) == x:\n return [x]\n\n for branch in branches(t):\n rest_of_path = a_path(b,x)\n if rest_of_path:\n return [label(t)] + rest_of_path", "def CloseSubpath(*args, **kwargs):\n return _gdi_.GraphicsPath_CloseSubpath(*args, **kwargs)", "def _submodel_path(self, i):\n template = self.config['submodel_relpath_template']\n return os.path.join(self.data_path, template % i)", "def addPath(self, from_node, to_node):\n x1,y1 = from_node.x,from_node.y\n x2,y2 = to_node.x,to_node.y\n \n pointsx = []\n pointsy = []\n \n\n m_new = 2 * (y2 - y1)\n slope_error_new = m_new - (x2 - x1)\n \n y=y1\n for x in range(x1,x2+1):\n \n pointsx.append(x)\n pointsy.append(y)\n # Add slope to increment angle formed\n slope_error_new =slope_error_new + m_new\n \n # Slope error reached limit, time to\n # increment y and update slope error.\n if (slope_error_new >= 0):\n y=y+1\n slope_error_new =slope_error_new - 2 * (x2 - x1)\n\n new_node = self.Node(to_node.x,to_node.y)\n new_node.path_x = pointsx\n new_node.path_y = pointsy\n new_node.path_x.append(to_node.x)\n new_node.path_y.append(to_node.y)\n\n print(\"len path x\",len(new_node.path_x))\n print(\"len path y\",len(new_node.path_y) )\n\n new_node.parent = from_node\n\n return new_node", "def get_subfiles(self) -> Set[str]:\n\n\t\t# Return\n\t\tself._update_subfiles()\n\t\treturn self.subfiles", "def create_path(self):\n\n partials = []\n partials.append({})\n #print self.trip_id\n\n #this variable is true if we have not yet recorded the first edge of a path\n first_edge = True\n #this variable is false until we hit the midpoint\n hit_midpoint = False\n\n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n good_graphs = []\n good_graphs.append(True)\n nodes_visited = []\n nodes_visited.append([])\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n good_graphs[matrices_index] = False\n else:\n edge_sets[matrices_index][edge_num] = 1\n if edge_num in partials[matrices_index] and partials[matrices_index][edge_num] == 0:\n del partials[matrices_index][edge_num]\n if not hit_midpoint:\n if first_edge:\n above = (prev_coords[0]-1,prev_coords[1])\n below = (prev_coords[0]+1,prev_coords[1])\n left = (prev_coords[0],prev_coords[1]-1)\n right = (prev_coords[0],prev_coords[1]+1)\n for next_coords in (above,below,left,right):\n other_edge = self.graph.edge_num(prev_coords[0],prev_coords[1],next_coords[0],next_coords[1])\n if other_edge != -1:\n partials[matrices_index][other_edge] = 0\n first_edge = False\n if self.graph.coords_to_node(prev_coords[0],prev_coords[1]) == self.midpoint:\n hit_midpoint = True\n partials[matrices_index][edge_num] = 1\n if self.graph.coords_to_node(coords[0],coords[1]) == self.midpoint:\n hit_midpoint = True\n\n\n\n if coords[0] == -1:\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n good_graphs.append(True)\n nodes_visited.append([])\n matrices_index += 1\n partials.append({})\n hit_midpoint = False\n first_edge = True\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n for coords in nodes_visited[best_index]:\n self.graph.node_visit(self.trip_id,coords)\n \n\n if self.trip_id not in self.graph.trip_id2line_num:\n #if first_lasts[best_index] == [28,5]:\n # print \"a to b: %d\" % self.trip_id\n self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],good_graphs[best_index],partials[best_index]", "def merge_paths(self, paths):\n merged_path = sum(paths, [])\n if len(merged_path) != len(set(merged_path)):\n raise PathGraphException(\"Path contains repeated elements. Can't add path\")\n\n # delete paths to merge\n for path in paths:\n self.delete_path_containing_node(path[0])\n\n self.add_path(merged_path)", "def combine(self, action):\n next_node = Path(action.end, self.path_cost + action.cost, parent=self)\n return next_node", "def get_subfiles(self) -> Set[str]:\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tself.subfiles.update(node.get_subfiles())\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tself.subfiles.update(subnode.filenames)\n\t\t# Return\n\t\treturn self.subfiles", "def generate_final_course(self, goal_index):\n path = [[self.end.x, self.end.y]]\n node = self.node_list[goal_index]\n while node.parent is not None:\n path.append([node.x, node.y]) # Adding nodes to the path\n node = node.parent\n path.append([node.x, node.y])\n\n return path", "def join(path, *paths: str) -> str:\n pass", "def path(self):\n node, return_path = self, []\n while node:\n # Add the nodes in reverse order to a list until you reach the\n # root parent node which will terminate the loop\n return_path.append(node)\n node = node.parent\n # Reverse the list to get the proper path back\n return list(reversed(return_path))", "def XCAFDoc_ShapeTool_GetSubShapes(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetSubShapes(*args)", "def get_path(self) :\n path = [self]\n s = self.get_parent()\n while s is not None :\n path.append(s)\n s = s.get_parent()\n path.reverse()\n return path", "def all_paths(self, node, destination, dist, path):\n\n d=self.dict()\n p=[]\n for i in range(len(path)):\n p.append(path[i])\n p.insert(len(p),node)\n \n if len(p)-1==dist:\n if node==destination:\n return p\n else:\n return None\n\n my_paths=[]\n\n for a in d[node]:\n if a not in p:\n p1=self.all_paths(a,destination,dist,p)\n\n if p1!=None:\n if isinstance(p1[0],list):\n for i in range(len(p1)):\n my_paths.append(p1[i])\n else:\n my_paths.append(p1)\n\n if len(my_paths)!=0:\n return my_paths\n else:\n return None", "def floyd_warshall_path(self, start, end, next_node): # pragma no cover\n if next_node[start][end] is None:\n return []\n path = [start]\n while start is not end:\n start = next_node[start][end]\n path.append(start)\n return path", "def get_final_key_paths(\n obj: Union[dict, list, tuple], cur_path: str = '',\n append_values: bool = False,\n paths: list = None, black_list: list = None,\n final_keys_only: bool = False):\n if paths is None:\n paths = []\n\n if isinstance(obj, (dict, list, tuple)):\n if isinstance(obj, dict):\n for key in obj:\n new_path = cur_path + f'[\\'{key}\\']'\n if isinstance(obj[key], dict):\n if black_list is not None and key in black_list:\n continue\n get_final_key_paths(\n obj[key], new_path, append_values, paths, black_list,\n final_keys_only)\n elif isinstance(obj[key], (list, tuple)):\n get_final_key_paths(\n obj[key], new_path, append_values, paths, black_list,\n final_keys_only)\n else:\n if final_keys_only:\n last_bracket = new_path.rfind('[\\'')\n new_path = new_path[\n last_bracket+2:new_path.rfind('\\'')]\n if append_values:\n to_append = [new_path, obj[key]]\n else:\n to_append = new_path\n paths.append(to_append)\n else:\n key_added = False\n for i in range(len(obj)):\n if isinstance(obj[i], (dict, tuple, list)):\n get_final_key_paths(\n obj[i], cur_path + f'[{i}]', append_values,\n paths, black_list, final_keys_only)\n else:\n if not key_added:\n if final_keys_only:\n last_bracket = cur_path.rfind('[\\'')\n cur_path = cur_path[\n last_bracket+2:cur_path.rfind('\\'')]\n if append_values:\n to_append = [cur_path, obj]\n else:\n to_append = cur_path\n paths.append(to_append)\n key_added = True\n\n return paths", "def syntactic_path(cand, expr, sent, paths=False):\n agg_path = u''\n if not paths:\n dist, predec = getpaths_sent(getgraph_sent(sent))\n else:\n dist, predec = paths\n # ↑ \n # ↓\n i = i1 = cand - 1\n i2 = expr -1\n while i != i2:\n if predec[i2, i]+1 == int(sent[i]['head']):\n agg_path += sent[i]['deprel'] #unicode(i)\n agg_path += u\"↑\"\n elif predec[i2, i]+1 in sent[i]['daughters']:\n agg_path += sent[predec[i2, i]]['deprel'] #unicode(i)\n agg_path += u\"↓\"\n else:\n return \"none\"\n print \"FEIL - ingen path funnet\"\n i = predec[i2, i]\n return agg_path", "def navigate_to_subpath(self, child):\n raise NotImplementedError()", "def path(self, source, target):\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]", "def path(self, source, target):\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]", "def get_shortest_paths_from_trans(original_trans, trans, spaths, visited_arcs, visited_transitions, added_elements,\r\n rec_depth):\r\n for out_arc in trans.out_arcs:\r\n if out_arc not in visited_arcs:\r\n visited_arcs.add(out_arc)\r\n target_place = out_arc.target\r\n for place_out_arc in target_place.out_arcs:\r\n if place_out_arc not in visited_arcs:\r\n visited_arcs.add(place_out_arc)\r\n target_trans = place_out_arc.target\r\n if target_trans not in visited_transitions:\r\n visited_transitions.add(target_trans)\r\n if target_trans.label:\r\n el1 = ((original_trans.name, target_trans.name), 0, rec_depth)\r\n if out_arc not in spaths:\r\n spaths[out_arc] = set()\r\n spaths[out_arc].add(el1)\r\n added_elements.add(el1)\r\n el2 = ((original_trans.name, target_trans.name), 1, rec_depth)\r\n if place_out_arc not in spaths:\r\n spaths[place_out_arc] = set()\r\n spaths[place_out_arc].add(el2)\r\n added_elements.add(el2)\r\n else:\r\n spaths, visited_arcs, visited_transitions, added_elements = get_shortest_paths_from_trans(\r\n original_trans,\r\n target_trans, spaths,\r\n visited_arcs,\r\n visited_transitions,\r\n added_elements,\r\n rec_depth + 1)\r\n for element in added_elements:\r\n new_element = list(element)\r\n if new_element[1] == 0:\r\n new_element[1] = 2\r\n if out_arc not in spaths:\r\n spaths[out_arc] = set()\r\n spaths[out_arc].add(tuple(new_element))\r\n if new_element[1] == 1:\r\n new_element[1] = 3\r\n if place_out_arc not in spaths:\r\n spaths[place_out_arc] = set()\r\n spaths[place_out_arc].add(tuple(new_element))\r\n return spaths, visited_arcs, visited_transitions, added_elements", "def get_extended_by(self, edge):\n return Path(self, edge)", "def GetSubShapes(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetSubShapes(*args)", "def expand_paths(self, paths):\n \n expanded_paths = []\n if isinstance(paths, str): # A single path\n expanded = glob.glob(paths)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n elif isinstance(paths, list): # Multiple path\n for p in paths:\n expanded = glob.glob(p)\n for e in expanded:\n expanded_paths.append(os.path.abspath(e))\n else:\n _LOG.exception(\"Unknown input for the 'add' function.\")\n return expanded_paths", "def constructShortestPath(self):", "def get_subgraph(self, node,subgraph=None):\n\t\tif subgraph == None:\n\t\t\tsubgraph = nx.DiGraph(database=self.database, server=self.server, root=node)\n\t\tfor edge in self.DiG.out_edges(node):\n\t\t\t# subgraph.add_node(edge[1],{'alias':get_alias(edge[1])})\n\t\t\t# if edge[0] in subgraph:\n\t\t\t# \tprint('child node: {} already in subgraph'.format(edge[0]))\n\t\t\t# if edge[1] in subgraph:\n\t\t\t# \tprint('parent node: {} already in subgraph'.format(edge[1]))\n\t\t\t# get_alias(edge[0])\n\t\t\tsubgraph.add_edge(node, edge[1],{'Column':self.DiG.get_edge_data(*edge)['Column']})\n\t\t\tself.get_subgraph(edge[1],subgraph)\n\t\t# for node in subgraph:\n\t\t# \tsubgraph[node]['alias'] = get_alias(node)\n\t\treturn subgraph", "def paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n paths += self._single_tree_paths(tree, return_indices=return_indices)\n return paths", "def add_paths(self, resource, paths):\n for path in sorted(paths):\n\n if isinstance(path, six.text_type):\n webPath = path.encode('utf8')\n else:\n webPath = path\n\n if path != b\"/\":\n resource.putChild(webPath, self.create_resource(paths[path]))", "def find_all_paths(parents_to_children, start, end, path=[]):\r\n path = path + [start]\r\n if start == end:\r\n return [path]\r\n if start not in parents_to_children.keys():\r\n return []\r\n paths = []\r\n for node in parents_to_children[start]:\r\n if node not in path:\r\n newpaths = find_all_paths(parents_to_children, node, end, path)\r\n for newpath in newpaths:\r\n paths.append(tuple(newpath))\r\n return paths", "def join_create_path(base_path, sub_dir):\n new_path = os.path.join(base_path, sub_dir)\n if not os.path.exists(new_path):\n os.mkdir(new_path)\n return new_path" ]
[ "0.59228057", "0.5917371", "0.5893389", "0.5885784", "0.58473676", "0.57791805", "0.5497774", "0.54971564", "0.5475109", "0.5418675", "0.5359241", "0.5319792", "0.5247005", "0.51880467", "0.51860625", "0.516922", "0.51333255", "0.5104041", "0.5086994", "0.5030389", "0.5015719", "0.4999733", "0.49690285", "0.49557117", "0.49517438", "0.49373305", "0.49032763", "0.48745564", "0.483924", "0.4820145", "0.48164785", "0.48056823", "0.4803772", "0.47894055", "0.47832268", "0.47811568", "0.47662148", "0.47504115", "0.47494397", "0.47425687", "0.4728896", "0.4713412", "0.47130257", "0.47108516", "0.47082642", "0.4702166", "0.46999258", "0.46907747", "0.4678616", "0.4678616", "0.46691585", "0.46517563", "0.46501645", "0.4633049", "0.4630741", "0.46285918", "0.46216536", "0.46179867", "0.46170494", "0.4615632", "0.46145236", "0.4612778", "0.46118122", "0.46074882", "0.46019325", "0.45982233", "0.4597534", "0.45906302", "0.45837563", "0.45830405", "0.45829523", "0.4575701", "0.45729387", "0.45674336", "0.45643994", "0.4560855", "0.45595354", "0.4557983", "0.45493826", "0.454078", "0.45402673", "0.45393574", "0.45387477", "0.45349273", "0.45344412", "0.45303893", "0.45237187", "0.45224488", "0.4506561", "0.4506561", "0.45024118", "0.4501104", "0.44951206", "0.44917595", "0.4485298", "0.44846445", "0.44689864", "0.44681674", "0.44674146", "0.44606796" ]
0.65290594
0
find nodes that are reachable from the top and count the number of outgoing edges for each node
def find_reachable_nodes(self): # find all reachable nodes down from the goal found = {} found[id(self.root)] = self.root queue = [self.root] #print >>sys.stderr, '---' while queue: node = queue.pop(0) if hasattr(node, 'dead'): if node.dead: #print >>sys.stderr, 'dead', node continue assert not node.dead for edge in node.incoming: for tailnode in edge.tail: #print >>sys.stderr, tailnode if id(tailnode) not in found: found[id(tailnode)] = tailnode queue.append(tailnode) tailnode.nout = 0 tailnode.nout += 1 # save for sanity check self.found = found
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_edges(self):\n return self._top_exp.number_of_edges()", "def guess_num_nodes_from(edgelist):\n return np.max(edgelist) + 1", "def count(self):\r\n return self.count_helper(self.top_node)", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def compute_num_edges(graph):\n # return the number of edges\n return sum([len(graph[source_node].keys()) for source_node in graph.keys()]) / 2", "def Nedges(self):\n return len(self.edges)", "def sum_edges(g, source, hops):\n\n edges = 0\n\n paths = nx.single_source_shortest_path(g, source, hops)\n for node in paths.iterkeys():\n edges += len(g.neighbors(node))\n\n return edges", "def nodes_per_time_step(graphs: typ.Iterable[vtna.graph.Graph]) -> typ.List[int]:\n return [len(set(node for edge in graph.get_edges() for node in edge.get_incident_nodes())) for graph in graphs]", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def count(self):\n\t\treturn len(list(self.nodes))", "def _number_of_edges(self):\n if self._edges is None:\n return 0\n return len(self._edges)", "def num_edges(self):\r\n return len(self.__generate_edges())", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def num_edges(self):\n return sum(1 for _ in self.iteredges())", "def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))", "def sort_nodes_by_n_reachable(graph):\n list_of_node_and_reachables_tups = [] # stores the number of reachable nodes per node\n # The following for loop finds the number of reachable nodes per node\n for node_to_test in graph.nodes:\n n_reachable = 0\n # The following for loop checks each node if it is reachable from node_to_test. If so, adds to the counter\n for node_is_reachable in graph.nodes:\n if graph.is_reachable(node_to_test, node_is_reachable) and node_to_test != node_is_reachable:\n n_reachable += 1\n # Adds a tuple with the node_to_test and the counter of reachable nodes\n list_of_node_and_reachables_tups.append((node_to_test, n_reachable))\n # At this point we have a list with tuples including the node name and its reachables. Now need to sort them\n sorted_nodes_by_reachable = sorted(list_of_node_and_reachables_tups, key=lambda tup: tup[1], reverse=True)\n return sorted_nodes_by_reachable", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def count_unvisited(data):\n count = sum(n.count(\"n\") for n in data)\n return count", "def edge_count(self):\n total = sum(len(self._outgoing[v]) for v in self._outgoing)\n # for undirected graphs, make sure not to double-count edges\n return total if self.is_directed() else total // 2", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def starting_nodes(self):\n # Level 0 nodes in a directed graph will have 1 or more out_edges but no in_edges\n nodes_with_outs = set(e[0] for e in self.G2.out_edges())\n nodes_with_ins = set(e[1] for e in self.G2.in_edges())\n return nodes_with_outs - nodes_with_ins", "def find_nodes(shp):\n node_count = {}\n for road in shp:\n vrts = road.vertices\n for node in vrts:\n if node not in node_count:\n node_count[node] = 0\n node_count[node] += 1\n node_count[vrts[0]] += 1\n node_count[vrts[-1]] += 1\n return set([node for node,c in node_count.iteritems() if c > 1])", "def bfs_counting(graph, root_vertex, bottom_vertex): # perform analysis twice: 1) set root_vertex = 't'; 2) set root_vertex = 's'\r\n\r\n #visited = dict()\r\n nd_list = graph.keys()\r\n visited = dict((node, 0) for node in nd_list)\r\n visited[bottom_vertex]=0\r\n\r\n nq = deque()# queue for recording current nodes\r\n pre_dict, dist, parents, node_count_dict = {}, {}, {}, {}\r\n \r\n nq.append(root_vertex)\r\n visited[root_vertex]=1\r\n dist[root_vertex] = 0\r\n parents[root_vertex]=['fake_root']\r\n node_count_dict['fake_root']=1\r\n while nq:\r\n s = nq.popleft() # dequeue\r\n \r\n node_count_dict[s] = 0\r\n for p in parents[s]: # count is defined as the sum of counts from all parents\r\n node_count_dict[s] += node_count_dict[p]\r\n\r\n #for node in self.__graph_dict[s]: # for each child/neighbour of current node 's'\r\n if not s in graph.keys():\r\n continue\r\n for node in graph[s]:\r\n\r\n #if not node in visited:\r\n if not visited[node]:\r\n nq.append(node) # let 'node' in queue\r\n pre_dict[node] = [s] # the 'parent' (in terms of shortest path from 'root') of 'node' is 's'\r\n dist[node] = dist[s] + 1 # shortest path to 'root'\r\n visited[node]=1 # 'node' is visted\r\n parents[node]=[s] # record 'parents' of this node\r\n else:\r\n parents[node].append(s) # record 'parents' of this node\r\n pre_dict[node].append(s)\r\n \r\n node_count_dict.pop('fake_root')\r\n return [pre_dict, node_count_dict] # two returns: 1) tree; 2) node count dictionary\r", "def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n", "def get_reachable_nodes(self, source: Node) -> Set[Node]:\n nodes_found: Set[Node] = {source}\n queue = [source]\n while queue:\n v = queue.pop()\n for e in v.outgoing_edges:\n if e.node_to not in nodes_found:\n nodes_found.add(e.node_to)\n queue.append(e.node_to)\n return nodes_found", "def topo_sort(self):\n # TODO: detect cycles\n self.find_reachable_nodes()\n # save list of nodes in topo order\n self.nodes = []\n # assign each node an id field incrementally\n cur_id = 0\n # count visited outgoing edges for each node\n unvisited = {}\n for nid, node in list(self.found.items()):\n unvisited[nid] = node.nout\n queue = [self.root]\n #print >>sys.stderr, '+++'\n while queue:\n # take off nodes whose all outgoing edges are visited from\n # queue head\n node = queue.pop(0)\n self.nodes.append(node)\n node.hg = self\n node.id = cur_id\n cur_id += 1\n for edge in node.incoming:\n edge.hg = self\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n unvisited[id(tailnode)] -= 1\n if unvisited[id(tailnode)] == 0:\n queue.append(tailnode)\n self.sanity_check()\n self.tasks_done.add('topo_sort')", "def size(self):\n return (len(self.nodes), sum([len(x.outgoing_relations) for x in self.nodes.values()]))", "def cyclomaticComplexity (self):\n self.tarjan()\n return len(self.__edges) - len(self.__nodes) + 2 * len(self.__scc)", "def edge_count(self):\r\n return int(sum(self.degree(node) for node in range(self.size))/2)", "def get_visited_nodes(self):\n return self.visited_nodes", "def get_number_of_edges(self, where_to=OUTGOING):\n\n if not self._directed:\n return len(self._outgoing)\n\n if where_to == Vertex.OUTGOING:\n return len(self._outgoing)\n elif where_to == Vertex.INCOMING:\n return len(self._incoming)", "def _num_edges(self):\n return int(self._edge_map[-1])", "def count_streets_per_node(G, nodes=None):\n if nodes is None:\n nodes = G.nodes\n\n # get one copy of each self-loop edge, because bi-directional self-loops\n # appear twice in the undirected graph (u,v,0 and u,v,1 where u=v), but\n # one-way self-loops will appear only once\n Gu = G.to_undirected(reciprocal=False, as_view=True)\n self_loop_edges = set(nx.selfloop_edges(Gu))\n\n # get all non-self-loop undirected edges, including parallel edges\n non_self_loop_edges = [e for e in Gu.edges(keys=False) if e not in self_loop_edges]\n\n # make list of all unique edges including each parallel edge unless the\n # parallel edge is a self-loop, in which case we don't double-count it\n all_unique_edges = non_self_loop_edges + list(self_loop_edges)\n\n # flatten list of (u, v) edge tuples to count how often each node appears\n edges_flat = itertools.chain.from_iterable(all_unique_edges)\n counts = Counter(edges_flat)\n streets_per_node = {node: counts[node] for node in nodes}\n\n utils.log(\"Counted undirected street segments incident on each node\")\n return streets_per_node", "def Nnodes(self):\n return len(self.nodes)", "def number_of_nodes(self) -> int:\n return self.graph.number_of_nodes()", "def num_of_edge(self):\n try:\n return self.edges\n except:\n print(\"ERROR: No graph exists\")", "def ngraphs(self):\n return len(self.__graph_list)", "def count_nodes(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 1 + self.get_left().count_nodes() + self.get_right().count_nodes()\n else:\n return 1 + self.get_left().count_nodes()\n else:\n return 1 + self.get_right().count_nodes()", "def num_edges(self):\n return sum(len(v.adj) for v in self.vertices.values())", "def calc_depths(self):\n graph = self._graph\n depth_cache = dict()\n\n for n in graph.topological_iter():\n preds = graph.predecessors(n)\n if not preds:\n depth_cache[n.op.key] = 0\n else:\n depth_cache[n.op.key] = 1 + max(depth_cache[ni.op.key] for ni in preds)\n return depth_cache", "def get_node_count(self) -> Iterable:\n return len([i for i in self.all_nodes_as_iterable()])", "def count_dependencies(self, stack):\n return self.__graph.in_degree(stack)", "def reduce_inbound_connections(inbound_counts, nodes):\n nodes_without_inbound = []\n for node in nodes:\n inbound_counts[node] = inbound_counts[node] - 1\n if inbound_counts[node] == 0:\n nodes_without_inbound.append(node)\n return nodes_without_inbound", "def analyze_edges_and_weight(list_of_nodes):\n edges_info = []\n for node in list_of_nodes:\n n_edge_of_node = len(node.neighbors) # Counts the kys in the dictionary 'Node.neighbors'\n total_weight_of_node = sum(list(map(lambda x: node.neighbors[x], node.neighbors))) # Sums values of the dict\n node_info = (node.name, n_edge_of_node, total_weight_of_node)\n edges_info.append(node_info)\n total_n_edges = sum([tup[1] for tup in edges_info]) # Sum total number of edges\n total_weight_of_graph = sum([tup[2] for tup in edges_info]) # Sum total weight of edges\n sorted_info = sorted(edges_info, key=lambda tup: tup[1], reverse=True)\n return \"Total number of edges is {},\\nTotal weight of the graph is {}:\\nNodes sorted by no. of edges: {}.\".format(total_n_edges, total_weight_of_graph, sorted_info)", "def control_edge_count(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count or 0)", "def number_of_edges(self) -> int:\n return self.graph.number_of_edges()", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands", "def compute_num_nodes(graph):\n return len(graph.keys()) # return the number of nodes in the graph", "def vertex_count(self):\n return len(self._outgoing)", "def init_inbound_counts(nodes, edges):\n inbound_counts = {}\n for node in nodes:\n inbound_counts[node] = 0\n for e in edges:\n inbound_counts[e[1]] = inbound_counts[e[1]] + 1\n return inbound_counts", "def branches(self):\n unique_nodes, unique_counts = np.unique(self.edges, return_counts=True)\n return unique_nodes[ unique_counts >= 3 ]", "def count_fillin(graph, nodes):\n count = 0\n for v1 in nodes:\n for v2 in nodes:\n if v1 != v2 and v2 not in graph[v1]:\n count += 1\n return count / 2", "def _num_edges(self):\n return len(self._eid2partid)", "def number_internal_links(self, node_list):\n n_links = self.internal_adjacency(node_list).sum()\n if self.directed:\n return n_links\n else:\n return n_links // 2", "def totalConnections(analyzer):\n return model.totalConnections(analyzer)", "def num_of_subgraphs(self):\n \n G = self.to_undirected_graph()\n \n count = G.num_subgraph()\n \n print('The number of disconnected components in the graph is ', count)", "def total_edges_per_time_step(graphs: typ.Iterable[vtna.graph.Graph]) -> typ.List[int]:\n return [sum(edge.get_count() for edge in graph.get_edges()) for graph in graphs]", "def number_active_neighbors(graph, node):\n return np.sum(\n [\n graph.nodes[neighbor_idx][\"agent\"].active\n for neighbor_idx in graph[node].keys()\n ]\n )", "def _num_nodes(self):\n return int(self._node_map[-1])", "def numShortestPaths(g, start, end):\n if g == None or start == None or end == None:\n raise InvalidInputException(\"Input is null.\")\n if not g.containsVertex(start) or not g.containsVertex(end):\n raise InvalidInputException(\"Doesn't contain source.\")\n nodes = []\n nodes.append(start)\n # Adds two decorations to the vertices\n for node in g.iterVertices():\n node.minCount = float(\"inf\")\n node.numCount = 0\n start.minCount = 0\n while not len(nodes) == 0:\n node = nodes.pop(0)\n # Base case: if node is the end node, return the numCount decoration\n if node == end:\n return node.numCount\n # Decorates neighboring nodes and adds them to the list\n for edge in g.incidentEdges(node):\n new_node = g.opposite(node,edge)\n if new_node.minCount == float(\"inf\"):\n new_node.minCount = node.minCount+1\n nodes.append(new_node)\n if new_node.minCount == node.minCount + 1:\n new_node.numCount = node.numCount + 1", "def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)", "def edgecount(self):\n\n raise NotImplementedError", "def num_edges(self):\n return self._ll_tree.get_num_edges()", "def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))", "def terminals(self):\n unique_nodes, unique_counts = np.unique(self.edges, return_counts=True)\n return unique_nodes[ unique_counts == 1 ]", "def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))", "def number_of_connections(self, asn):\n customer_count = 0\n provider_count = 0\n peer_count = 0\n\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == asn:\n customer_count += 1\n elif edge_data[\"relationship\"] == -1 and edge_data[\"as2\"] == asn:\n provider_count += 1\n elif edge_data[\"relationship\"] == 0:\n peer_count += 1\n return customer_count, provider_count, peer_count", "def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2", "def countNodes(epr):\n result = 1\n argLst = epr.args\n for arg in argLst:\n result += countNodes(arg)\n return result", "def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count", "def order(self):\n return len(self._nodes)", "def get_num_edges(self):\n\n return self._graph_state.get_num_edges()", "def get_node_count(self) -> Iterable:\n return self._g.V().count().toList()[0]", "def street_segment_count(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return len(Gu.edges)", "def number_of_trail_edges(self):\n return len(list(self.trail_edges()))", "def node_count(self):\n if self.value:\n cnt = 0\n else:\n left_cnt = self.left.node_count()\n right_cnt = self.right.node_count()\n cnt = 1 + left_cnt + right_cnt\n return cnt", "def cc_visited(ugraph):\n\tremain = []\n\tfor node in ugraph:\n\t\tremain.append(node)\n\tconnected = []\n\twhile remain:\n\t\tvisited = bfs_visited(ugraph, remain[0])\n\t\tconnected.append(visited)\n\t\tremain = [i for i in remain if not i in visited]\n\treturn connected", "def get_all_edges(self):\n sum = 0\n for vertex in self:\n sum += vertex.get_edges()\n return sum", "def node_size(graph):\n adj = nx.betweenness_centrality(graph)\n return np.array([x * 1e3 for x in adj.values()])", "def count_paths((min_i, min_j), (max_i, max_j)):\n\n def explore((i, j), path):\n found = 0\n for (x, y) in neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if (x, y) == (max_i, max_j):\n found += 1\n debug(\"neighbor %r of node %r on path %r is a goal node: +1\" % ((x, y), (i, j), path))\n elif (x, y) in path: \n debug(\"neighbor %r of node %r already on path %r; ignoring...\" % ((x, y), (i, j), path))\n continue\n else:\n debug(\"neighbor %r of node %r not already on path %r; exploring ...\" % ((x, y), (i, j), path))\n found += explore((x, y), mkpath(path, (x, y)))\n return found\n return explore((0, 0), set([(0, 0)]))", "def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten", "def count_nodes(self):\n if self.children is None:\n return 0\n\n total_count = 0\n for child in self.children:\n if child is None:\n return 0\n child_count = child.count_nodes()\n total_count = total_count + child_count\n\n return total_count+1", "def five_cycles(graph, vertex):\n count = 0\n for i in range(len(graph[vertex])):\n for j in range(i + 1, (len(graph[vertex]))):\n \tnode1 = graph[vertex][i]\n \tnode2 = graph[vertex][j]\n \tfor neighbour1 in graph[node1]:\n \t\tif neighbour1 != vertex and neighbour1 != node2:\n \t\t\tfor neighbour2 in graph[node2]:\n \t\t\t\tif neighbour2 != vertex and neighbour2 != node1 and neighbour2 in graph[neighbour1]:\n\t\t \t\t\tcount += 1\n return count", "def neighbours(self):\n return [x.node for x in self.edges]", "def size(self):\n return len(self.edges())", "def DFS(adj): # adj is the list of vertices in graph G\n\n global cc\n global visited\n\n for v in range(len(adj)): # adjacency list has length == number of nodes\n visited[v] = False\n cc = 1\n\n for v in range(len(adj)):\n if not visited[v]:\n explore(v)\n # increment connected component count after each return from explore()\n cc = cc + 1 # only increment for each unvisited node explored here\n return cc", "def edge_count(self) -> int:\n return int(self.graph_tuple_stats.edge_count or 0)", "def NodesCount(self):\n return len(self.nodes)", "def call_edge_count(self) -> int:\n return int(self.graph_tuple_stats.call_edge_count or 0)", "def node_count(self):\n return self._node_count", "def num_nodes(self):\n return len(self.nodes)", "def countComponents(self, n: int, edges: List[List[int]]) -> int:\n # BFS O_n time and space\n \n # union find ALG\n uf = UnionFind(n)\n \n for x, y in edges:\n uf.union(x, y)\n \n return len(set(uf.find(x) for x in range(n)))", "def get_num_nodes(self):\n\n return sum(self.topology)", "def size(self):\n traverse = self.head\n count = 1\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count", "def neighbors(self, max_dist=3):\n # TODO: this may have problems because the set doesn't\n # compare object id but uses user defined comparison methods\n # TODO: outgoing edges are no longer saved\n found = set()\n found.add(self)\n queue = [(self, 0)]\n while queue:\n node, d = queue.pop(0)\n if d < max_dist:\n for edge in node.outgoing:\n if edge.head not in found:\n found.add(edge.head)\n queue.append((edge.head, d+1))\n for edge in node.incoming:\n for tailnode in edge.tail:\n if tailnode not in found:\n found.add(tailnode)\n queue.append((tailnode, d+1))\n return found", "def v_size(self) -> int:\n return self.nodes_on_graph", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def streets_per_node(G):\n spn = dict(nx.get_node_attributes(G, \"street_count\"))\n if set(spn) != set(G.nodes):\n utils.log(\"Graph nodes changed since `street_count`s were calculated\", level=lg.WARN)\n return spn", "def num_edges(self):\n return (self.n * (self.L.size() - 1) - self.num_loop_vertices()) // (1 + int(not self.variant.is_bipartite()))" ]
[ "0.64417076", "0.63781655", "0.63414526", "0.63138336", "0.6295989", "0.6219865", "0.621095", "0.61789745", "0.61628515", "0.61369354", "0.6134823", "0.61268103", "0.61224973", "0.60808635", "0.60650635", "0.60560346", "0.6023287", "0.60106254", "0.60075134", "0.59867114", "0.59765416", "0.5947132", "0.5946439", "0.59292376", "0.59082735", "0.59005904", "0.5891714", "0.5891645", "0.5884957", "0.58837575", "0.5874722", "0.5844909", "0.5832295", "0.58240473", "0.58229625", "0.5821597", "0.5809133", "0.5808966", "0.58064646", "0.5795017", "0.57923186", "0.57824785", "0.5780503", "0.5776157", "0.5772392", "0.5767247", "0.57581675", "0.57556695", "0.57506514", "0.57469374", "0.5740354", "0.57046247", "0.5699673", "0.5688801", "0.56877476", "0.56870574", "0.56816626", "0.5679885", "0.56778026", "0.56747866", "0.56710005", "0.567018", "0.56641823", "0.56551534", "0.5654926", "0.56528723", "0.56376046", "0.56239426", "0.5612278", "0.55997115", "0.5599237", "0.55907744", "0.5589479", "0.5580729", "0.5571946", "0.55705696", "0.5566932", "0.55617476", "0.5556572", "0.55552065", "0.55545837", "0.55502635", "0.5547403", "0.5539438", "0.55394024", "0.55365825", "0.55337435", "0.55334824", "0.5531274", "0.55207217", "0.55095315", "0.550481", "0.550103", "0.5476125", "0.5475643", "0.5471188", "0.5471069", "0.5471069", "0.54689854", "0.5464371" ]
0.5841281
32
top down topo sort. nodes that don't reach the target node are thrown away
def topo_sort(self): # TODO: detect cycles self.find_reachable_nodes() # save list of nodes in topo order self.nodes = [] # assign each node an id field incrementally cur_id = 0 # count visited outgoing edges for each node unvisited = {} for nid, node in list(self.found.items()): unvisited[nid] = node.nout queue = [self.root] #print >>sys.stderr, '+++' while queue: # take off nodes whose all outgoing edges are visited from # queue head node = queue.pop(0) self.nodes.append(node) node.hg = self node.id = cur_id cur_id += 1 for edge in node.incoming: edge.hg = self for tailnode in edge.tail: #print >>sys.stderr, tailnode unvisited[id(tailnode)] -= 1 if unvisited[id(tailnode)] == 0: queue.append(tailnode) self.sanity_check() self.tasks_done.add('topo_sort')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reset_topological_order(self):\n self._topological_order = self._input_nodes[:]\n self.sorted = False", "def topologicalSort(self):\r\n visited = [False]*self.vertices \r\n stack =[]\r\n \"\"\"\r\n using stack, problems with using code given by\r\n professor (using queues) so I'm using a new approach\r\n \"\"\"\r\n for i in range(self.vertices):\r\n \"\"\"\r\n traversing thru number of vertices, checking\r\n if false, and if is, goes to helper method\r\n \"\"\"\r\n if visited[i] == False: \r\n self.topologicalSortUtil(i,visited,stack) \r\n \r\n\r\n print(stack)", "def _topological_sort(self):\n self._reset_topological_order()\n\n def is_connected(src, dst):\n \"\"\"Judge two node whether are connected.\"\"\"\n for precursor in dst.precursor_nodes:\n if src == precursor.split(\":\")[0]:\n return 1\n return 0\n\n idx = 0\n while idx < len(self._topological_order):\n cur_node_name = self._topological_order[idx]\n cur_node = self.get_node(cur_node_name)\n # `scsr` is abbreviation for `successor`.\n for scsr_name in cur_node.successor_nodes:\n scsr_node = self.get_node(scsr_name)\n scsr_node.cur_in_degree -= is_connected(cur_node_name,\n scsr_node)\n if scsr_node.cur_in_degree == 0:\n self._topological_order.append(scsr_name)\n idx += 1\n self.sorted = True", "def sort(self):\n srt = self.sources()\n stack = list(srt) # makes a copy\n while stack:\n node = stack.pop(0)\n if (not node.isSink()):\n # if a child is not in srt, and all of its parents are in srt,\n # then add it. Must have all parents to get true topo sort.\n newChildren = filter(lambda x: len(set(x.parents()) - set(srt))==0,\n [child for child in node.children() if child not in srt])\n stack.extend(newChildren)\n srt.extend(newChildren)\n return srt", "def topological_sort(self):\n\t\t#detect leaves\n\t\tnumChildren = dict((n.name,0) for n in self.variables.values())\n\t\tfor n in self.variables.itervalues():\n\t\t\tfor p in n.parents:\n\t\t\t numChildren[p]+=1\n\t\t#do a BFS from leaves to get the reverse topological sort\n\t\ttopo = []\n\t\tqueue = [n for (n,c) in numChildren.iteritems() if c==0]\n\t\tif len(queue)==0:\n\t\t\traise ValueError(\"Bayes net is not acyclic?\")\n\t\twhile len(queue)>0:\n\t\t\tn = self.variables[queue.pop(0)]\n\t\t\ttopo.append(n)\n\t\t\tfor p in n.parents:\n assert numChildren[p]>0\n numChildren[p] -= 1\n if numChildren[p]==0:\n queue.append(p)\n\t\t#now reverse it to get the top down ordering\n assert len(topo)==len(self.variables)\n\t\treturn reversed(topo)", "def test_0(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r2)\r\n o2 = MyOp.make_node(o.outputs[0], r5)\r\n\r\n all = general_toposort(o2.outputs, prenode)\r\n assert all == [r5, r2, r1, o, o.outputs[0], o2, o2.outputs[0]]\r\n\r\n all = io_toposort([r5], o2.outputs)\r\n assert all == [o, o2]", "def topological_sort(graph, rootKey = None):\n\n\t# Reset's the attribute values of all Nodes in graph to their initialization values.\n\t# Importantly, resets Node.searchStatus to \"undiscovered\" and Node.parent to None.\n\tgraph.reset()\n\n\ttopologicalKeyList = []\n\n\t# time is declared inside a function and so must be made global.\n\tglobal time; time = 0\n\n\t# If a starting root is specified, begin there.\n\tif rootKey is not None:\n\t\ttopological_sort_visit(graph, rootKey, topologicalKeyList)\n\n\t# Visit each undiscovered Node.\n\n\t# The keys are ordered here to enforce an easily predictable traversal.\n\t# This is not necessary and reduces efficiency, but makes testing very straightforward. \n\t# For the purposes of this program this loss in efficiency is acceptable.\n\torderedKeys = list(graph.adjacencyMap.keys()); orderedKeys.sort()\n\tfor key in orderedKeys:\n\t\tif graph.vertexMap[key].searchStatus == \"undiscovered\":\n\t\t\ttopological_sort_visit(graph, key, topologicalKeyList)\n\n\t# Explored and created a forest within graph.\n\treturn topologicalKeyList", "def topological_sort(g) -> list:\n dfs(g)\n res = [v for v in g]\n quick_sort(res, key=lambda v: v.finish_time)\n res.reverse()\n return res", "def _toposort(edges):\r\n incoming_edges = reverse_dict(edges)\r\n incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())\r\n S = set((v for v in edges if v not in incoming_edges))\r\n L = []\r\n\r\n while S:\r\n n = S.pop()\r\n L.append(n)\r\n for m in edges.get(n, ()):\r\n assert n in incoming_edges[m]\r\n incoming_edges[m].remove(n)\r\n if not incoming_edges[m]:\r\n S.add(m)\r\n if any(incoming_edges.get(v, None) for v in edges):\r\n raise ValueError(\"Input has cycles\")\r\n return L", "def _topological_sort(self):\n\n visited = defaultdict(bool)\n stack = []\n\n for pod in self.pods:\n if not visited[pod]:\n self._topological_sort_pod(pod, visited, stack)\n\n return stack[::-1]", "def sorting(self, presorted=None):\n self._sorted_nodes = []\n if presorted:\n notsorted_nodes = copy(presorted)\n else:\n notsorted_nodes = copy(self.nodes)\n predecessors = {key: copy(val) for (key, val) in self.predecessors.items()}\n\n # nodes that depends only on the self._nodes_wip should go first\n # soe remove them from the connections\n for nd_out in self._node_wip:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)\n\n while notsorted_nodes:\n sorted_part, notsorted_nodes = self._sorting(notsorted_nodes, predecessors)\n self._sorted_nodes += sorted_part\n for nd_out in sorted_part:\n for nd_in in self.successors[nd_out.name]:\n predecessors[nd_in.name].remove(nd_out)", "def test_1(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r1)\r\n o2 = MyOp.make_node(o.outputs[0], r5)\r\n all = general_toposort(o2.outputs, prenode)\r\n assert all == [r5, r1, o, o.outputs[0], o2, o2.outputs[0]]", "def binarize_top_down(self, ignored_elements=frozenset()):\n\n fifo = collections.deque((self.top,))\n is_seen = {self.top}\n\n while fifo:\n # print(\"\\n\", fifo)\n vertex = fifo.pop()\n # print(\"element \", vertex)\n if len(self.under(vertex)) > 2 and vertex not in ignored_elements:\n self.binarization_element_under(vertex)\n visit_list = self.under(vertex)\n for neighbor in visit_list:\n if neighbor not in is_seen:\n is_seen.add(neighbor)\n fifo.appendleft(neighbor)\n\n return self", "def fast_targeted_order(ugraph):\n\n graph = copy_graph(ugraph)\n# print graph\n size = len(ugraph) #- 1\n degree_sets = [set() for _ in range(size)]\n order = []\n\n for node, edges in ugraph.iteritems():\n degree = len(edges)\n degree_sets[degree].add(node)\n# print degree_sets\n\n for k in range(size - 1, -1, -1):\n while degree_sets[k]:\n node = degree_sets[k].pop()\n neighbors = graph[node]\n for neighbor in neighbors:\n degree = len(graph[neighbor])\n degree_sets[degree].remove(neighbor)\n degree_sets[degree - 1].add(neighbor)\n order.append(node)\n delete_node(graph, node)\n return order", "def topological_sort(self):\n \n visited = set()\n sorted_node = [] \n\n # sort all the node in the graph\n for i in self.node_set: \n if i not in visited: \n visited = self.topological_sort_helper(i, visited, sorted_node) \n \n visited.clear()\n return sorted_node", "def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order", "def toposort(nodes, get_next_nodes):\n traversing = set()\n traversed = set()\n result = collections.deque()\n\n def traverse(node):\n if node in traversing:\n raise CycleError(node)\n if node in traversed:\n return # not a cycle but we already saw this\n traversing.add(node)\n for next in get_next_nodes(node):\n traverse(next)\n traversed.add(node)\n traversing.remove(node)\n result.appendleft(node)\n\n for node in nodes:\n traverse(node)\n\n return list(result)", "def fast_target_order(ugraph):\n result_order = []\n num_nodes = len(ugraph.keys())\n degrees = [set([]) for _ in range(num_nodes)]\n for node in ugraph.keys():\n node_degree = len(ugraph[node])\n degrees[node_degree].add(node)\n for degree in range(num_nodes - 1, -1, -1):\n while degrees[degree] != set([]):\n elem = degrees[degree].pop()\n for neighbor in ugraph[elem]:\n n_degree = len(ugraph[neighbor])\n degrees[n_degree].remove(neighbor)\n degrees[n_degree - 1].add(neighbor)\n result_order.append(elem)\n delete_node(ugraph, elem)\n return result_order", "def test_sort(self):\n a, b, c, d = Node('a'), Node('b'), Node('c'), Node('d')\n a | b | c\n a * 'foo' | 'bar' * c\n d | 'baz' * b\n nodes = topo_sort([a, d])\n self.assertEqual(set(nodes[:2]), set([a, d]))\n self.assertEqual(nodes[2:], [b, c])", "def topSort(self, x=None, seen=None, order=deque([]), cycle=False):\n\n if x is None:\n for x in self.getVertices(): # choose an arbitrary vertex\n break\n\n if seen is None:\n seen = {vertex: False for vertex in self.getVertices()}\n\n seen[x] = True\n\n for y, weight in self.outboundEdges(x):\n if seen[y]:\n cycle = True\n return False\n\n order = self.topSort(y, seen, order, cycle)\n\n if order == False:\n cycle = True\n return False\n\n\n order.appendleft(x)\n return order\n\n # print(\"%i \" % x, end='')", "def sort_nodes(self):\n non_terminal_nodes = []\n for node in self.nodes:\n if not node.entries:\n assert self.start is None, (\n 'there are more than one node with no incoming arcs')\n self.start = node\n elif not node.exits:\n assert self.end is None, (\n 'there are more than one node with no outgoing arcs')\n self.end = node\n else:\n non_terminal_nodes.append(node)\n assert self.start is not None and self.end is not None, (\n 'no start or end node')\n self.nodes = ([self.start]\n + sorted(non_terminal_nodes,\n key=lambda x: (x.entry, x.sym))\n + [self.end])\n for n in self.nodes:\n n.exits.sort(key=lambda x: (x.dest.entry, x.dest.sym))", "def test_topological_sort(self) -> None:\n\n # tuple convention: (outgoing, incoming)\n graph = {\n \"1\": ([\"4\"], []),\n \"2\": ([\"4\"], []),\n \"3\": ([\"5\", \"6\"], []),\n \"4\": ([\"7\", \"5\"], [\"1\", \"2\"]),\n \"5\": ([\"8\"], [\"4\", \"3\"]),\n \"6\": ([], [\"3\"]),\n \"7\": ([\"8\"], [\"4\"]),\n \"8\": ([], [\"7\", \"5\"])\n } # type: Dict[str, Tuple[List[str], List[str]]]\n\n self.assertEqual(topological_sort(graph, [\"1\", \"2\", \"3\"]), [\"1\", \"2\", \"3\", \"4\", \"6\", \"7\", \"5\", \"8\"])", "def test_topological_sort(self) -> None:\n\n # tuple convention: (outgoing, incoming)\n graph = {\n \"1\": ([\"4\"], []),\n \"2\": ([\"4\"], []),\n \"3\": ([\"5\", \"6\"], []),\n \"4\": ([\"7\", \"5\"], [\"1\", \"2\"]),\n \"5\": ([\"8\"], [\"4\", \"3\"]),\n \"6\": ([], [\"3\"]),\n \"7\": ([\"8\"], [\"4\"]),\n \"8\": ([], [\"7\", \"5\"])\n } # type: Dict[str, Tuple[List[str], List[str]]]\n\n self.assertEqual(topological_sort(graph, [\"1\", \"2\", \"3\"]), [\"1\", \"2\", \"3\", \"4\", \"6\", \"7\", \"5\", \"8\"])", "def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n \n counter = 0\n order = [] \n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n counter += 1\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n \n \n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n counter += 1\n new_graph[neighbor].remove(max_degree_node)\n\n order.append(max_degree_node)\n return order # uncomment to use graph_resilience_targeted_order", "def fast_targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n \n degree_sets = {} # degree_set[k] is a set of all nodes whose degree is k\n for k in range(len(new_graph)):\n degree_sets[k] = set([])\n \n for i in new_graph:\n d = len(new_graph[i])\n degree_sets[d].add(i)\n\n attack_order =[]\n n = len(new_graph)\n for k in reversed(range(n-1)):\n while len(degree_sets[k]):\n u = random.choice(tuple(degree_sets[k])) # u is an arbitrary element in degree-set[k]\n degree_sets[k].remove(u)\n for v in new_graph[u]: # v is the neighbor of u\n d = len(new_graph[v])\n degree_sets[d].remove(v)\n degree_sets[d-1].add(v)\n\n attack_order.append(u)\n delete_node(new_graph, u)\n \n return attack_order", "def dependency_order(self):\n seen = set()\n\n def _prune_visited(node):\n if node in seen:\n return True\n seen.add(node)\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_prune_visited):\n yield node.data", "def toposort(adj):\n # Memoize for visited vertex\n used = [0] * len(adj)\n order = []\n # write your code here\n # Traverse through each vertex\n for i in range(len(adj)):\n if not used[i]:\n # If not visited, run dfs\n dfs(adj, used, order, i)\n\n # Reverse the order list to show in descending order\n order.reverse()\n return order", "def sort(self):\n while self.nodes != []:\n iterated = False\n for node in self.leaf_nodes():\n iterated = True\n self.prune_node(node)\n yield node\n if not iterated:\n raise CyclicGraphError(\"Sorting has found a cyclic graph.\")", "def topological_sort(self, verbose=True):\n visited = set()\n stack = []\n rec_stack = set()\n for vertex in self.vertices():\n if vertex not in visited:\n if self._topological_sort(vertex, visited, stack, rec_stack):\n print('ERROR: Graph is cyclic! Cannot perform Topological Sort.')\n return None\n if verbose:\n print('TopologicalSort(Graph):', stack)\n return stack", "def sort(self):\r\n return self.sort_targets([self])", "def find_topo_sort(node_list):\r\n visited = set()\r\n topo_order = []\r\n #print(node_list)\r\n for node in node_list:\r\n topo_sort_dfs(node, visited, topo_order)\r\n return topo_order", "def topological_sort(self):\n in_degree = {}\n for node in self.graph:\n in_degree[node] = 0\n\n for from_node in self.graph:\n for to_node in self.graph[from_node]:\n in_degree[to_node] += 1\n\n queue = deque()\n for node in in_degree:\n if in_degree[node] == 0:\n queue.appendleft(node)\n\n sorted_nodes = []\n while queue:\n independent_node = queue.pop()\n sorted_nodes.append(independent_node)\n for next_node in self.graph[independent_node]:\n in_degree[next_node] -= 1\n if in_degree[next_node] == 0:\n queue.appendleft(next_node)\n\n if len(sorted_nodes) == len(self.graph):\n return sorted_nodes\n else:\n raise ValueError('graph is not acyclic')", "def topological_sort(graph_unsorted):\n graph_sorted = []\n graph_unsorted = dict(graph_unsorted)\n while graph_unsorted:\n acyclic = False\n for node, edges in list(graph_unsorted.items()):\n if edges is None:\n edges = []\n for edge in edges:\n if edge in graph_unsorted:\n break\n else:\n acyclic = True\n del graph_unsorted[node]\n graph_sorted.append((node, edges))\n if not acyclic:\n raise RuntimeError('A cyclic dependency occurred')\n return graph_sorted", "def remove_with_sort(to_remove):\n slow = to_remove.head\n runner = to_remove.head\n\n while slow:\n while runner:\n if runner.next_node:\n if slow.value == runner.next_node.value:\n runner.next_node = runner.next_node.next_node\n runner = runner.next_node\n slow = slow.next_node\n try:\n runner = slow.next_node\n except:\n pass", "def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n \n order = []\n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n \n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n new_graph[neighbor].remove(max_degree_node)\n \n order.append(max_degree_node)\n return order", "def topological_sort(X, root):\r\n neighbors = X.neighbors\r\n\r\n visited = defaultdict(lambda: False)\r\n\r\n stack = []\r\n parents = {}\r\n\r\n build_topological(root, None, neighbors, visited, stack, parents)\r\n return stack, parents", "def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n\n order = []\n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n\n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n new_graph[neighbor].remove(max_degree_node)\n\n order.append(max_degree_node)\n return order", "def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n\n order = []\n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n\n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n new_graph[neighbor].remove(max_degree_node)\n\n order.append(max_degree_node)\n return order", "def targeted_order(ugraph):\r\n # copy the graph\r\n new_graph = copy_graph(ugraph)\r\n \r\n order = [] \r\n while len(new_graph) > 0:\r\n max_degree = -1\r\n for node in new_graph:\r\n if len(new_graph[node]) > max_degree:\r\n max_degree = len(new_graph[node])\r\n max_degree_node = node\r\n \r\n neighbors = new_graph[max_degree_node]\r\n new_graph.pop(max_degree_node)\r\n for neighbor in neighbors:\r\n new_graph[neighbor].remove(max_degree_node)\r\n\r\n order.append(max_degree_node)\r\n return order", "def _sort_nodes_by_height(self):\n self.node_high_to_low = np.argsort(self.height)[::-1]\n\n # Also to sort neighbour node array by height\n\n neighbour_array_lo_hi = self.neighbour_array.copy() # easiest way to get size / structure right\n\n for node in range(0,self.tri.npoints):\n heights = self.height[self.neighbour_array[node]]\n neighbour_array_lo_hi[node] = self.neighbour_array[node][np.argsort(heights)]\n \n self.neighbour_array_lo_hi = neighbour_array_lo_hi", "def topological_sort_visit(graph, thisKey, topologicalKeyList):\n\n\t# Discover the Node at thisKey.\n\tglobal time\n\ttime += 1\n\tthisNode = graph.vertexMap[thisKey]\n\tthisNode.searchStatus = \"exploring\"\n\n\t# Explore each undiscovered adjacent Node and set their parent attributes.\n\n\t# The keys are ordered here to enforce an easily predictable traversal.\n\t# This is not necessary and reduces efficiency, but makes testing very straightforward. \n\t# For the purposes of this program this loss in efficiency is acceptable.\n\tsortedAdjacentKeys = list(graph.adjacentKeys(thisKey)); sortedAdjacentKeys.sort()\n\tfor adjacentKey in sortedAdjacentKeys:\n\t\tadjacentNode = graph.vertexMap[adjacentKey]\n\t\tif adjacentNode.searchStatus == \"undiscovered\":\n\t\t\tadjacentNode.parent = thisNode\n\t\t\ttopological_sort_visit(graph, adjacentKey, topologicalKeyList)\n\n\t# All adjacent Nodes have been explored.\n\ttime += 1\n\tthisNode.finishTime = time\n\tthisNode.searchStatus = \"finished\"\n\ttopologicalKeyList.insert(0, thisKey)", "def test_2(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r1)\r\n r2b = o.outputs[0]\r\n o2 = MyOp.make_node(r2b, r2b)\r\n all = io_toposort([r2b], o2.outputs)\r\n assert all == [o2]\r\n\r\n o2 = MyOp.make_node(r2b, r5)\r\n all = io_toposort([r2b], o2.outputs)\r\n assert all == [o2]", "def findTopologicalOrder(self):\n # This implementation temporarily messes with reverse stars, must fix at end\n numOrderedNodes = 0\n while numOrderedNodes < self.numNodes:\n nextNode = self.findLeastEnteringLinks()\n if len(self.node[nextNode].reverseStar) > 0:\n print(\"Error: Network given to findTopologicalOrder contains a cycle.\")\n raise BadNetworkOperationException\n numOrderedNodes += 1\n self.node[nextNode].order = numOrderedNodes\n self.node[nextNode].reverseStar = [0] * self.numLinks\n for ij in self.node[nextNode].forwardStar:\n self.node[self.link[ij].head].reverseStar.remove(ij)\n \n # Repopulate reverse star list\n for i in self.node:\n self.node[i].reverseStar = list()\n for ij in self.link:\n self.node[self.link[ij].head].reverseStar.append(ij)", "def sorted_nodes(self):\r\n def is_source(node, connections):\r\n for connection in connections:\r\n if node == connection[1]:\r\n return False\r\n return True\r\n\r\n def source_connections(node, connections):\r\n conns = set()\r\n for connection in connections:\r\n if node == connection[0]:\r\n conns.add(connection)\r\n return conns\r\n\r\n nodes = set(self.nodes.values())\r\n connections = self.connections.copy()\r\n sorted_nodes = []\r\n\r\n # Find source nodes:\r\n source_nodes = set([n for n in nodes if is_source(n, connections)])\r\n\r\n # while S is non-empty do\r\n while source_nodes:\r\n # remove a node n from S\r\n node = source_nodes.pop()\r\n # insert n into L\r\n sorted_nodes.append(node)\r\n\r\n # for each node m with an edge e from n to m do\r\n s_connections = source_connections(node, connections)\r\n for connection in s_connections:\r\n # remove edge e from the graph\r\n m = connection[1]\r\n connections.remove(connection)\r\n # if m has no other incoming edges then\r\n # insert m into S\r\n if is_source(m, connections):\r\n source_nodes.add(m)\r\n\r\n # if graph has edges then\r\n # output error message (graph has at least one cycle)\r\n # else\r\n # output message (proposed topologically sorted order: L)\r\n\r\n if connections:\r\n raise Exception(\"Steram has at least one cycle (%d connections left of %d)\" % (len(connections), len(self.connections)))\r\n\r\n return sorted_nodes", "def sift_down(self, start, end):\n i, j = start, 2*start+1\n # Temporary variable to decrease exchange times\n temp = self.heap_list[start]\n # end is equal to len(self.heap_list)-1\n while j <= end:\n # compare left child node with right child node\n if j<end and self.heap_list[j]<self.heap_list[j+1]:\n j += 1\n if temp >= self.heap_list[j]:\n break\n else:\n #self.heap_list[i], self.heap_list[j] = self.heap_list[j], self.heap_list[i]\n self.heap_list[i] = self.heap_list[j]\n i = j\n j = 2*j+1\n self.heap_list[i] = temp", "def find_up(l_node, f_node):\r\n if isinstance(l_node, gof.Apply):\r\n l_outs = l_node.outputs\r\n else:\r\n l_outs = l_node\r\n l_ins = gof.graph.inputs(l_outs)\r\n nodes = gof.graph.io_toposort(l_ins, l_outs)\r\n return f_node in nodes", "def topological_sort(adjmat, start=None):\n # Convert to adjmat\n if isinstance(adjmat, dict):\n adjmat = adjmat.get('adjmat', None)\n elif np.all(np.isin(adjmat.columns, ['source','target','weight'])):\n adjmat = vec2adjmat(adjmat['source'], adjmat['target'])\n\n # Convert to graph\n graph = adjmat2dict(adjmat)\n # Do the topological sort\n seen = set()\n stack = [] # path variable is gone, stack and order are new\n order = [] # order will be in reverse order at first\n if start is None:\n q = list(graph)\n else:\n q = [start]\n while q:\n v = q.pop()\n if v not in seen:\n seen.add(v) # no need to append to path any more\n q.extend(graph[v])\n\n while stack and v not in graph[stack[-1]]: # new stuff here!\n order.append(stack.pop())\n stack.append(v)\n\n return stack + order[::-1]", "def connect_backwards(self):\n\n for n in self.nodes:\n n.receives_from = []\n\n for n1 in self.nodes:\n for n2 in n1.sends_to:\n n2.receives_from.append(n1)", "def sorted_traversal(self):\n\t\tself.__sorted_traversal(self)", "def pruning_order(self, max_to_prune=None):\n\n def _get_terminal_nodes(children):\n \"\"\"Lists the nodes that only have leaves as children\"\"\"\n leaves = np.where(children[:,0]==_tree.TREE_LEAF)[0]\n child_is_leaf = np.in1d(children, leaves).reshape(children.shape)\n return np.where(np.all(child_is_leaf, axis=1))[0]\n\n def _next_to_prune(tree, children=None):\n \"\"\"Weakest link pruning for the subtree defined by children\"\"\"\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]\n\n if max_to_prune is None:\n max_to_prune = self.node_count - sum(self.children_left == _tree.TREE_UNDEFINED)\n\n children = np.array([self.children_left.copy(), self.children_right.copy()]).T\n nodes = list()\n\n while True:\n node = _next_to_prune(self, children)\n nodes.append(node)\n\n if (len(nodes) == max_to_prune) or (node == 0):\n return np.array(nodes)\n\n #Remove the subtree from the children array\n children[children[node], :] = _tree.TREE_UNDEFINED\n children[node, :] = _tree.TREE_LEAF", "def build_order(self):\n seen = set()\n\n def _already_built(node):\n # visit only once\n if node in seen:\n return True\n seen.add(node)\n\n # prune if the result is already computed\n if node.output_ready:\n return True\n\n return False\n\n for target in self.targets:\n if target in seen:\n continue\n for node in target.postorder(prune_fn=_already_built):\n yield node", "def topological_sort(graph_unsorted):\n\n # This is the list we'll return, that stores each node/edges pair\n # in topological order.\n graph_sorted = []\n\n # Convert the unsorted graph into a hash table. This gives us\n # constant-time lookup for checking if edges are unresolved, and\n # for removing nodes from the unsorted graph.\n graph_unsorted = dict(graph_unsorted)\n\n # Run until the unsorted graph is empty.\n while graph_unsorted:\n # Go through each of the node/edges pairs in the unsorted\n # graph. If a set of edges does not contain any nodes that\n # haven't been resolved, that is, that are still in the\n # unsorted graph, remove the pair from the unsorted graph,\n # and append it to the sorted graph. Note here that by using\n # using the items() method for iterating, a copy of the\n # unsorted graph is used, allowing us to modify the unsorted\n # graph as we move through it. We also keep a flag for\n # checking that that graph is acyclic, which is true if any\n # nodes are resolved during each pass through the graph. If\n # not, we need to bail out as the graph therefore can't be\n # sorted.\n acyclic = False\n for node, edges in list(graph_unsorted.items()):\n if edges is None:\n edges = []\n for edge in edges:\n if edge in graph_unsorted:\n break\n else:\n acyclic = True\n del graph_unsorted[node]\n graph_sorted.append((node, edges))\n\n if not acyclic:\n # Uh oh, we've passed through all the unsorted nodes and\n # weren't able to resolve any of them, which means there\n # are nodes with cyclic edges that will never be resolved,\n # so we bail out with an error.\n raise RuntimeError(\"A cyclic dependency occurred\")\n\n return graph_sorted", "def bottom_up(root, node2distances, node2num_paths, node2parents):\n ###TODO\n pass", "def _topological_sort_dfs(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def fast_targeted_order(ugraph):\n degree_sets = {}\n num_nodes = len(ugraph)\n for node in range(0, num_nodes):\n degree_sets[node] = set([])\n\n for node_degree in range(0, num_nodes):\n degree = len(ugraph[node_degree])\n degree_sets[degree].add(node_degree)\n\n desc_node_degrees = []\n index = 0\n\n for degree in range(num_nodes - 1, -1, -1):\n while len(degree_sets[degree]) > 0:\n node_to_add = degree_sets[degree].pop()\n for neighbor in ugraph[node_to_add]:\n neighbor_degree = len(ugraph[neighbor])\n degree_sets[neighbor_degree].remove(neighbor)\n degree_sets[neighbor_degree - 1].add(neighbor)\n desc_node_degrees.insert(index, node_to_add)\n index += 1\n a.delete_node(ugraph, node_to_add)\n\n return desc_node_degrees", "def toposorted_actions(self) -> Iterable[Action]:\n # Here we execute two \"nanopasses\" (a term borrowed from compiler implementation)\n #\n # 1. Traverse a values-and-actions graph, reducing it to a dependency graph containing actions\n #\n # 2. Perform a toposort over actions (using Kahn's algorithm https://en.wikipedia.org/wiki/Topological_sorting)\n #\n # TODO: switch to graphlib from standard library\n #\n # TODO: Consider using Tarjan's strongly connected components algorithm\n # Rationale: Tarjan's SCC would find loops and produce a helpful diagnostic\n\n # 1. Dependency graph representation optimized for toposort\n o: dict[Action, set[Action]] = {} # for actions: action -> set of outgoing dependency edges\n i: dict[Action, set[Action]] = {} # for actions: action -> set of incoming dependency edges\n\n # set of nodes without incoming edges\n s: Set[Action] = set()\n\n # 1. Transform execution plan into dependency graph\n for action in self.actions:\n # if action does not depend on any other action, add it to set s\n if all(inp.producer() is None for inp in action.inputs()):\n s.add(action)\n # add outgoing edges to graph, if any\n for output in action.outputs():\n for depending_action in output.consumers():\n # add an edge action -> depending_action to the graph\n if action not in o:\n o[action] = set()\n if depending_action not in i:\n i[depending_action] = set()\n o[action].add(depending_action)\n i[depending_action].add(action)\n\n # 2. Now run Kahn's algorithm (could be separated from previous to improve abstraction)\n # resulting list\n l: list[Action] = []\n\n while len(s) > 0:\n n = s.pop()\n l.append(n)\n if n in o:\n o_n = o[n]\n del o[n]\n else:\n o_n = set()\n while len(o_n) > 0:\n # remove edge from the graph\n m = o_n.pop()\n i[m].remove(n)\n if len(i[m]) == 0:\n del i[m]\n s.add(m)\n\n if len(o) != 0 or len(i) != 0:\n for (node, edges) in o.items():\n print(\"Source: \" + str(node))\n for e in edges:\n print(\" Edge: \" + str(e))\n raise Exception(\"Dependency graph has at least one cycle\")\n else:\n return l", "def findClosestNodes(self, target: hash.hash.Hash):\n # TODO: make more efficient\n # See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table\n \n nodes = []\n \n for bucket in self.buckets:\n nodes = nodes + bucket.nodes\n\n nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))\n\n return nodes[:config.K]", "def move_up_node(g,i,k): # i: node address of null element, k: coefficient\n d = i%k*k #d: address of dependent node coindexed with i\n # co-index stored in 'ctag'\n g.nodes[d]['ctag'] = g.nodes[i]['ctag']\n h = g.nodes[d]['head'] # assumption: 'head' exists\n h_new = chose_head(g,h,d)\n g.nodes[d]['head'] = h_new\n rel = g.nodes[d]['rel']\n g.nodes[d]['rel'] = 'NP2P'\n g.add_arc(h_new,d)\n g.nodes[h]['deps'][rel].remove(d)", "def topologicalSort(self, nodes: List[int], prerequisites: List[List[int]]) -> List[int]:\n # graph\n # prep: x ->requires set(y1, y2, ..), before take x need y1, y2, .., takes count only\n # post: y ->followup set(x1, x2, ..), \n # after take y, it is possible (only possible may not, as x_i might need others) x1, x2, ..\n prep = defaultdict(lambda: 0)\n post = defaultdict(set)\n for x, y in prerequisites:\n prep[x] += 1\n post[y].add(x)\n # schedule\n # start with all nodes requires no prerequisites\n schedule, boundary = [], [x for x in nodes if x not in prep]\n while boundary:\n y = boundary.pop()\n schedule.append(y)\n if y in post:\n xs = post.pop(y)\n for x in xs:\n prep[x] -= 1\n # all prerequisites of x are cleared \n if prep[x] == 0:\n prep.pop(x)\n boundary.append(x)\n # some nodes are impossible to complete?\n if prep:\n return []\n return schedule", "def _topological_sort(self, curr_vertex, visited, stack, rec_stack):\n visited.add(curr_vertex)\n rec_stack.add(curr_vertex)\n for vertex in self.neighbors(curr_vertex):\n if vertex not in visited:\n if self._topological_sort(vertex, visited, stack, rec_stack):\n return True\n elif vertex in rec_stack:\n return True\n stack.insert(0, curr_vertex)\n rec_stack.remove(curr_vertex)", "def sift_up(heap, start, end):\n # Swap last node with parents until no longer greater.\n i = end - 1\n heaped = False\n while i > start and not heaped:\n parent = (i - 1) // 2\n if compare(heap[i], heap[parent]) > 0:\n heap[i], heap[parent] = heap[parent], heap[i]\n i = parent\n else:\n heaped = True", "def __update_priority_queue(self):\r\n # Loop over every node\r\n for node in self.__graph.nodes:\r\n # Only if its fully visited do we want to remove it\r\n if node.is_fully_visited:\r\n # This searched for the wanted visited node\r\n for visited_node in self.__priority_queue:\r\n if visited_node.node_name == node.name:\r\n # Once found move it to the fully visited list\r\n self.__fully_visited.append(visited_node)\r\n # Then remove it form the priority queue\r\n self.__priority_queue.remove(visited_node)", "def topological_nodes_generator(graph, reverse=...):\n ...", "def TopologicallySorted(graph, get_edges):\n get_edges = memoize(get_edges)\n visited = set()\n visiting = set()\n ordered_nodes = []\n def Visit(n):\n if n in visiting:\n raise CycleError(visiting)\n if n in visited:\n return\n visited.add(n)\n visiting.add(n)\n for neighbor in get_edges(n):\n Visit(neighbor)\n visiting.remove(n)\n ordered_nodes.insert(0, n)\n for node in sorted(graph):\n Visit(node)\n return ordered_nodes", "def walk_sort(edges):\n g = nx.Graph()\n g.add_edges_from(edges)\n connected = set()\n degree = nx.degree(g)\n ordering = []\n while degree:\n next = max_degree_node(g, degree, connected)\n if next is not None:\n ordering.append(next)\n else:\n break\n return ordering", "def sort_targets(cls, internal_targets):\r\n roots = OrderedSet()\r\n inverted_deps = collections.defaultdict(OrderedSet) # target -> dependent targets\r\n visited = set()\r\n path = OrderedSet()\r\n\r\n def invert(target):\r\n if target in path:\r\n path_list = list(path)\r\n cycle_head = path_list.index(target)\r\n cycle = path_list[cycle_head:] + [target]\r\n raise cls.CycleException(cycle)\r\n path.add(target)\r\n if target not in visited:\r\n visited.add(target)\r\n if getattr(target, 'internal_dependencies', None):\r\n for internal_dependency in target.internal_dependencies:\r\n if hasattr(internal_dependency, 'internal_dependencies'):\r\n inverted_deps[internal_dependency].add(target)\r\n invert(internal_dependency)\r\n else:\r\n roots.add(target)\r\n path.remove(target)\r\n\r\n for internal_target in internal_targets:\r\n invert(internal_target)\r\n\r\n ordered = []\r\n visited.clear()\r\n\r\n def topological_sort(target):\r\n if target not in visited:\r\n visited.add(target)\r\n if target in inverted_deps:\r\n for dep in inverted_deps[target]:\r\n topological_sort(dep)\r\n ordered.append(target)\r\n\r\n for root in roots:\r\n topological_sort(root)\r\n\r\n return ordered", "def find_topo_sort(node_list):\n visited = set()\n topo_order = []\n for node in node_list:\n topo_sort_dfs(node, visited, topo_order)\n return topo_order", "def find_topo_sort(node_list):\n visited = set()\n topo_order = []\n for node in node_list:\n topo_sort_dfs(node, visited, topo_order)\n return topo_order", "def _tarjan (self, v):\n if self.__tarjanIndex.get(v) is not None:\n # \"Root\" was already reached.\n return\n self.__tarjanIndex[v] = self.__tarjanLowLink[v] = self.__index\n self.__index += 1\n self.__stack.append(v)\n source = v\n for target in self.__edgeMap.get(source, []):\n if self.__tarjanIndex[target] is None:\n self._tarjan(target)\n self.__tarjanLowLink[v] = min(self.__tarjanLowLink[v], self.__tarjanLowLink[target])\n elif target in self.__stack:\n self.__tarjanLowLink[v] = min(self.__tarjanLowLink[v], self.__tarjanLowLink[target])\n else:\n pass\n\n if self.__tarjanLowLink[v] == self.__tarjanIndex[v]:\n scc = []\n while True:\n scc.append(self.__stack.pop())\n if v == scc[-1]:\n break\n self.__sccOrder.append(scc)\n if 1 < len(scc):\n self.__scc.append(scc)\n [ self.__sccMap.setdefault(_v, scc) for _v in scc ]", "def dft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n s = Stack() # create an empty Stack \n s.push(starting_vertex) # push the starting_vertex to the top of the stack\n\n while s.size() > 0: # loop if the size is greater than 0\n v = s.pop() # pop off first element and store \n\n if v not in visited: # if v has not been visited yet\n visited.add(v) # add to the set \n print(v)\n for neighbor in self.vertices[v]: # loop through neighbors \n s.push(neighbor) # add each neighbor to the bottom of the stack", "def topological_sort(items, partial_order):\n\n def add_node(graph, node):\n \"\"\"Add a node to the graph if not already exists.\"\"\"\n if node not in graph:\n graph[node] = [0] # 0 = number of arcs coming into this node.\n\n def add_arc(graph, fromnode, tonode):\n \"\"\"Add an arc to a graph. Can create multiple arcs.\n The end nodes must already exist.\"\"\"\n graph[fromnode].append(tonode)\n # Update the count of incoming arcs in tonode.\n graph[tonode][0] += 1\n\n # step 1 - create a directed graph with an arc a->b for each input\n # pair (a,b).\n # The graph is represented by a dictionary. The dictionary contains\n # a pair item:list for each node in the graph. /item/ is the value\n # of the node. /list/'s 1st item is the count of incoming arcs, and\n # the rest are the destinations of the outgoing arcs. For example:\n # {'a':[0,'b','c'], 'b':[1], 'c':[1]}\n # represents the graph: c <-- a --> b\n # The graph may contain loops and multiple arcs.\n # Note that our representation does not contain reference loops to\n # cause GC problems even when the represented graph contains loops,\n # because we keep the node names rather than references to the nodes.\n graph = {}\n for v in items:\n add_node(graph, v)\n for a, b in partial_order:\n add_arc(graph, a, b)\n\n # Step 2 - find all roots (nodes with zero incoming arcs).\n roots = [node for (node, nodeinfo) in graph.items() if nodeinfo[0] == 0]\n\n # step 3 - repeatedly emit a root and remove it from the graph. Removing\n # a node may convert some of the node's direct children into roots.\n # Whenever that happens, we append the new roots to the list of\n # current roots.\n sorted_items = []\n while len(roots) != 0:\n # If len(roots) is always 1 when we get here, it means that\n # the input describes a complete ordering and there is only\n # one possible output.\n # When len(roots) > 1, we can choose any root to send to the\n # output; this freedom represents the multiple complete orderings\n # that satisfy the input restrictions. We arbitrarily take one of\n # the roots using pop(). Note that for the algorithm to be efficient,\n # this operation must be done in O(1) time.\n root = roots.pop()\n sorted_items.append(root)\n for child in graph[root][1:]:\n graph[child][0] = graph[child][0] - 1\n if graph[child][0] == 0:\n roots.append(child)\n del graph[root]\n if len(graph.items()) != 0:\n # There is a loop in the input.\n return None\n return sorted_items", "def topologicalSort(graph):\n vertexState = dict.fromkeys(graph.keys(), DFS.WHITE)\n topologicalSort = []\n def dfs(vertex):\n vertexState[vertex] = DFS.GRAY\n for neighbor in graph[vertex]:\n if vertexState[neighbor] == DFS.WHITE:\n dfs(neighbor)\n vertexState[vertex] = DFS.BLACK\n topologicalSort.append(vertex)\n for vertex in graph.keys():\n if vertexState[vertex] == DFS.WHITE:\n dfs(vertex)\n return topologicalSort", "def test_walk_graph(self):\n def assertDependencyWalk(target, results, postorder=False):\n targets = []\n self.build_graph.walk_transitive_dependency_graph([target.address],\n lambda x: targets.append(x),\n postorder=postorder)\n self.assertEquals(results, targets)\n\n def assertDependeeWalk(target, results, postorder=False):\n targets = []\n self.build_graph.walk_transitive_dependee_graph([target.address],\n lambda x: targets.append(x),\n postorder=postorder)\n self.assertEquals(results, targets)\n\n a = self.make_target('a')\n b = self.make_target('b', dependencies=[a])\n c = self.make_target('c', dependencies=[b])\n d = self.make_target('d', dependencies=[c, a])\n e = self.make_target('e', dependencies=[d])\n\n assertDependencyWalk(a, [a])\n assertDependencyWalk(b, [b, a])\n assertDependencyWalk(c, [c, b, a])\n assertDependencyWalk(d, [d, c, b, a])\n assertDependencyWalk(e, [e, d, c, b, a])\n\n assertDependeeWalk(a, [a, b, c, d, e])\n assertDependeeWalk(b, [b, c, d, e])\n assertDependeeWalk(c, [c, d, e])\n assertDependeeWalk(d, [d, e])\n assertDependeeWalk(e, [e])\n\n assertDependencyWalk(a, [a], postorder=True)\n assertDependencyWalk(b, [a, b], postorder=True)\n assertDependencyWalk(c, [a, b, c], postorder=True)\n assertDependencyWalk(d, [a, b, c, d], postorder=True)\n assertDependencyWalk(e, [a, b, c, d, e], postorder=True)\n\n assertDependeeWalk(a, [e, d, c, b, a], postorder=True)\n assertDependeeWalk(b, [e, d, c, b], postorder=True)\n assertDependeeWalk(c, [e, d, c], postorder=True)\n assertDependeeWalk(d, [e, d], postorder=True)\n assertDependeeWalk(e, [e], postorder=True)\n\n #Try a case where postorder traversal is not identical to reversed preorder traversal\n c = self.make_target('c1', dependencies=[])\n d = self.make_target('d1', dependencies=[c])\n b = self.make_target('b1', dependencies=[c, d])\n e = self.make_target('e1', dependencies=[b])\n a = self.make_target('a1', dependencies=[b, e])\n\n assertDependencyWalk(a, [a, b, c, d, e])\n assertDependencyWalk(a, [c, d, b, e, a], postorder=True)", "def test_stable_ordering(self):\n with Graph('g') as graph:\n a = ParrotNode(['a'])\n p = a | pike.merge()\n b = ParrotNode(['b'])\n graph.source | b | p\n # Make sure that b runs before a\n if graph.nodes.index(b) > graph.nodes.index(a):\n graph.nodes.remove(b)\n graph.nodes.insert(graph.nodes.index(a), b)\n ret = graph.run()\n self.assertEqual(list(ret['default']), ['a', 'b'])", "def traverse_up(node, node_callback):\n node_callback(node)\n for e in node.edges_out:\n traverse_up(e.dst, node_callback)", "def _sorting(self, notsorted_list, predecessors):\n remaining_nodes = []\n sorted_part = []\n for nd in notsorted_list:\n if not predecessors[nd.name]:\n sorted_part.append(nd)\n else:\n remaining_nodes.append(nd)\n return sorted_part, remaining_nodes", "def topology_complete(self):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def toposorted(infos):\n key_to_info = {}\n depends = {}\n for info in infos:\n key_to_info[info.key] = info\n depends[info.key] = []\n for info in infos:\n for after in info.after:\n after_info = key_to_info[after]\n depends[info.key].append(after_info)\n for before in info.before:\n before_info = key_to_info[before]\n depends[before_info.key].append(info)\n return topological_sort(infos, lambda info: depends[info.key])", "def sift_down(array, start, end):\n root = start\n while root*2+1 <= end:\n child = root*2+1\n swap = root\n if array[swap] < array[child]:\n swap = child\n if child+1 <= end and array[swap] < array[child+1]:\n swap = child+1\n if swap != root:\n array[root], array[swap] = array[swap], array[root]\n root = swap\n else:\n break", "def _drop_ground_node(self):\n self.laplace = np.concatenate((\n np.concatenate((\n self.laplace[:self.ground_node,:self.ground_node],\n self.laplace[:self.ground_node,self.ground_node+1:]),1),\n\n np.concatenate((\n self.laplace[self.ground_node+1:,:self.ground_node],\n self.laplace[self.ground_node+1:,self.ground_node+1:]), 1)))\n\n self.degree = np.concatenate((\n self.degree[:self.ground_node], \n self.degree[self.ground_node+1:]))", "def recoverTree(self, root: TreeNode) -> None:\n nodes = []\n self.dfs(root, nodes)\n tmp = []\n for i in range(len(nodes) - 1):\n if nodes[i].val > nodes[i+1].val:\n tmp.append(nodes[i])\n tmp.append(nodes[i+1])\n\n if len(tmp) == 2:\n tmp[0].val, tmp[1].val = tmp[1].val, tmp[0].val\n elif len(tmp) == 4:\n tmp[0].val, tmp[3].val = tmp[3].val, tmp[0].val", "def _upheap(self, node):\n parent = self.parent(node)\n while parent is not None and node.element() < parent.element():\n self._swap(node, parent) # Move node upward while key\n parent = self.parent(node) # smaller than parent's key", "def getDownstream(node, distance, pInteractions):\n seenNodes = set([node])\n borderNodes = [node]\n frontierNodes = []\n for dist in range(distance):\n while len(borderNodes) > 0:\n currNode = borderNodes.pop()\n if currNode in pInteractions:\n for i in pInteractions[currNode].keys():\n if i not in seenNodes:\n seenNodes.update([i])\n frontierNodes.append(i)\n borderNodes = deepcopy(frontierNodes)\n frontierNodes = list()\n return(seenNodes)", "def djikstra(nodes,links,source,dest):\n route = []\n vertexes = []\n for v in nodes:\n v.set_dist(float(\"inf\"))\n v.set_prev(None)\n heappush(vertexes, v)\n source.set_dist(0)\n heapify(vertexes)\n while vertexes:\n unsorted = False\n u = heappop(vertexes)\n if u == dest:\n break #because we found the destination no need to look further\n for v in u.get_links():\n if v.get_enabled():\n alt = u.get_dist() + 1\n target = v.get_target()\n if alt < target.get_dist():\n target.set_dist(alt)\n target.set_prev(u)\n unsorted = True #just a variable that help check if changes were made to the objects inside the heap\n if unsorted: #because i updated the variables but the heap wasn't maintained, i just heapify it again\n heapify(vertexes) \n #this is the part that saves the distance and route \n if dest.get_dist() == float(\"inf\"): #if there is no route then we just return None\n return None\n u = dest\n while u.get_prev() != None:\n v = u.get_prev()\n route.insert(0, v.get_specific_link(u)) \n u = v\n return route", "def test_4(self):\r\n r1, r2, r3, r4 = MyVariable(1), MyVariable(2), MyVariable(3), MyVariable(4)\r\n o0 = MyOp.make_node(r1, r2)\r\n o1 = MyOp.make_node(o0.outputs[0], r1)\r\n all = io_toposort([r1, o0.outputs[0]], [o0.outputs[0], o1.outputs[0]])\r\n assert all == [o1]", "def sort(self):\n def siftDown(start, count):\n \"\"\"\n This method tries to swap down the children's of the branch\n given by index 'start', making the lowest.\n \"\"\"\n root = start\n while root * 2 + 1 < count:\n child = root * 2 + 1 # 'child' is the left children of the current node\n if child < count - 1 and self.data[child] > self.data[child + 1]:\n # Verify that right sibling is lower than the left one, if so,\n # let 'child' be the right sibling\n child += 1\n if self.data[root] > self.data[child]:\n # Swap the current child and the parent if the parent is higher than the child\n self.data[root], self.data[child] = self.data[child], self.data[root]\n root = child\n else:\n return\n\n start = self.size / 2 - 1\n end = self.size - 1\n\n # Is this really necessary? If the structure is already ordered by \"heap-way\"...\n while start >= 0:\n # This is necessary to verify that we end-up with a correct min-heap structure,\n # because we can sort the structure at any time and end up with a max-heap.\n siftDown(start, self.size)\n start -= 1\n \n while end > 0:\n # With a 'min-heap' structure, it only takes swapping the first and the\n # \"last\" element in the heap to order it, and then reorder the heap\n # from the beginning to the \"end\"\n self.data[end], self.data[0] = self.data[0], self.data[end]\n siftDown(0, end)\n end -= 1", "def filter_graph(self, sorted_node, ploidy):\n \n for node in sorted_node:\n \n # while number of prefix edge > ploidy level\n while len(self.prefix[node]) > ploidy:\n min_weight_node = min(self.prefix[node], key=self.prefix[node].get)\n self.remove_edge(min_weight_node, node)\n \n # while number of suffix edge > ploidy level\n while len(self.suffix[node]) > ploidy:\n min_weight_node = min(self.suffix[node], key=self.suffix[node].get)\n self.remove_edge(node, min_weight_node)\n \n print(\"Graph is reduced to best overlap graph.\")", "def BidirectionalSearch(start_node, end_node, goal_state, improved_descendants = False):\t\n\tqueue_down = [start_node]\n\tqueue_up = [end_node]\n\n\tvisited_nodes_down = set()\n\tvisited_nodes_up = set()\n\n\tnumber_nodes_expanded = 0\n\tnumber_nodes_visited = 0\n\n\tchild_nodes_down = []\n\tchild_nodes_up = []\n\n\thash_value_down = {}\n\thash_value_up = {}\n\n\tt0 = time.time()\n\t\n\twhile len(queue_down) > 0 or len(queue_up) > 0:\n\t\ttop_expanded = False\n\t\tbottom_expanded = False\n\n\t\t#if the search down still has nodes to expand\n\t\tif len(queue_down) > 0:\n\t\t\tnode_down = queue_down.pop(0)\n\t\t\tbottom_expanded = True\n\t\t\tnumber_nodes_visited += 1\n\t\t\tnode_down.count = number_nodes_visited\n\t\t\n\t\t#if the search up still has nodes to expand\n\t\tif len(queue_up) > 0:\n\t\t\tnode_up = queue_up.pop(0)\n\t\t\ttop_expanded = True\n\t\t\tnumber_nodes_visited += 1\n\t\t\tnode_up.count = number_nodes_visited\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\treturn False\n\n\t\tif bottom_expanded:\n\t\t\tnode_down_hash = node_down.build_hash()\n\n\t\t\tif node_down_hash not in visited_nodes_down:\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tvisited_nodes_down.add(node_down_hash)\n\t\t\t\thash_value_down[node_down_hash] = node_down\n\t\t\t\tchild_nodes_down = node_down.successors(improved_descendants)\n\n\t\t\t\tfor i in range(len(child_nodes_down)):\n\t\t\t\t\tqueue_down.append(child_nodes_down[i])\n\t\t\telse:\n\t\t\t\tchild_nodes_down = []\n\n\t\tif top_expanded:\n\t\t\tnode_up_hash = node_up.build_hash()\n\t\t\tif node_up_hash not in visited_nodes_up:\n\t\t\t\tvisited_nodes_up.add(node_up_hash)\n\t\t\t\thash_value_up[node_up_hash] = node_up\n\n\t\t\t\tnumber_nodes_expanded += 1\n\t\t\t\tchild_nodes_up = node_up.successors(improved_descendants)\n\t\t\t\n\t\t\t\tfor i in range(len(child_nodes_up)):\n\t\t\t\t\tqueue_up.append(child_nodes_up[i])\n\t\t\telse:\n\t\t\t\tchild_nodes_up = []\n\n\t\t#The node expanded on the search down was already expanded in the search up or vice-versa\n\t\tif bottom_expanded and (node_down_hash in visited_nodes_up):\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\tdepth_found = print_solution(node_down, number_nodes_expanded, goal_state, hash_value_up[node_down_hash])\n\t\t\treturn True\n\t\telif top_expanded and (node_up_hash in visited_nodes_down):\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\tdepth_found = print_solution(hash_value_down[node_up_hash], number_nodes_expanded, goal_state, node_up)\n\t\t\treturn True\n\t\t\t\t\n\treturn False", "def _dfs_postorder(node, visited):\n\t# print(\"_dfs_postorder\")\n\tif node.lo is not None:\n\t\tyield from _dfs_postorder(node.lo, visited)\n\tif node.hi is not None:\n\t\tyield from _dfs_postorder(node.hi, visited)\n\tif node not in visited:\n\t\tvisited.add(node)\n\t\tyield node", "def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order", "def _backward(T, edge_to_P, root, root_prior_distn, node_to_data_fset):\n v_to_subtree_partial_likelihoods = {}\n for v in nx.topological_sort(T, [root], reverse=True):\n fset_data = node_to_data_fset[v]\n if T and T[v]:\n cs = T[v]\n else:\n cs = set()\n if cs:\n partial_likelihoods = {}\n for s in fset_data:\n prob = _get_partial_likelihood(edge_to_P,\n v_to_subtree_partial_likelihoods, v, cs, s)\n if prob is not None:\n partial_likelihoods[s] = prob\n else:\n partial_likelihoods = dict((s, 1) for s in fset_data)\n if v == root:\n pnext = {}\n for s in set(partial_likelihoods) & set(root_prior_distn):\n pnext[s] = partial_likelihoods[s] * root_prior_distn[s]\n partial_likelihoods = pnext\n v_to_subtree_partial_likelihoods[v] = partial_likelihoods\n return v_to_subtree_partial_likelihoods", "def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)", "def _up_heap(self, j):\n p = self._parent(j)\n if j > 0 and self._data[j] < self._data[p]:\n self._swap(j, p)\n self._up_heap(p)", "def test_sort_chain_multiple_structure_decreasing():\n n = 14\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(n):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with values decreasing\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with values decreasing\".format(n)", "def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])", "def topological_sort_helper(self, node, visited, sorted_node):\n \n # add the current node as visited. \n visited.add(node)\n\n # repeat for all suffixes of the node\n for i in self.suffix[node]: \n if i not in visited: \n self.topological_sort_helper(i, visited, sorted_node) \n\n # push current node as the first value in list \n sorted_node.insert(0,node) \n return visited", "def move_up(g,k): # g: graph; k: coefficient\n for i,_ in path(g): #i: node address\n if (i%k)!=0:\n move_up_node(g,i,k)", "def uninformed_search(start, end, graph):\n\n class SearchNode():\n def __init__(self, step_cost, name, predecessor):\n self.path_cost = predecessor.path_cost + step_cost if predecessor is not None else 0\n self.step_cost = step_cost\n self.name = name\n self.predecessor = predecessor\n def __repr__(self):\n return self.predecessor.name + \"->\" + self.name + \"=\" + self.path_cost\n\n class Problem():\n def __init__(self, start, end, graph, goal_predicate):\n self.start = start\n self.end = end\n self.graph = graph\n self.is_goal = goal_predicate\n self.visited_nodes = []\n\n nodes_expanded = 0\n nodes_generated = 0\n max_nodes_in_memory = 0\n\n def tree_search(problem, fringe):\n nonlocal nodes_generated\n nonlocal nodes_expanded\n nonlocal max_nodes_in_memory\n\n # create the initial node\n nodes_generated = 1\n fringe = [SearchNode(0, problem.start, None)]\n\n while len(fringe) > 0:\n # keep track of some metrics\n max_nodes_in_memory = max(max_nodes_in_memory, len(fringe))\n nodes_expanded += 1\n\n node = fringe.pop(0)\n while node.name in problem.visited_nodes:\n # ran out of nodes in the fringe\n if len(fringe) == 0:\n return None\n\n node = fringe.pop(0)\n\n if problem.is_goal(node):\n return node\n \n # make sure we never visit this node again, since we'll be expanding it\n problem.visited_nodes.append(node.name)\n\n # keep the fringe sorted by the path cost\n fringe.extend(expand(node, problem))\n fringe = sorted(\n fringe, \n key=lambda node: node.path_cost\n )\n\n return None\n\n def expand(node, problem):\n nonlocal nodes_generated\n nodes = []\n for edge in problem.graph.edges(node.name):\n nodes.append(SearchNode(edge.weight, edge.destination, node))\n \n nodes_generated += len(nodes)\n return nodes\n\n initial_problem = Problem(start, end, graph, lambda x: x.name == end)\n result = tree_search(initial_problem, [])\n\n # convert the resulting nested structure into an actual path of (start, end, cost)\n def walk(node):\n pred = node.predecessor\n if pred is None:\n return []\n \n path = walk(pred)\n path.append((pred.name, node.name, node.step_cost))\n return path\n\n path = walk(result) if result is not None else None\n return (path, nodes_expanded, nodes_generated, max_nodes_in_memory)", "def _insort(self, node):\n lo = 0\n hi = len(self._pool)\n while lo < hi:\n mid = (lo+hi)//2\n if node.getFScore() < self._pool[mid].getFScore(): hi = mid\n else: lo = mid + 1\n self._pool.insert(lo, node)", "def test_3(self):\r\n r1, r2, r3, r4 = MyVariable(1), MyVariable(2), MyVariable(3), MyVariable(4)\r\n o0 = MyOp.make_node(r1, r2)\r\n o1 = MyOp.make_node(r3, r4)\r\n all = io_toposort([r1, r2, r3, r4], o0.outputs + o1.outputs)\r\n assert all == [o1,o0]" ]
[ "0.6479977", "0.64079857", "0.63420755", "0.63285846", "0.62891173", "0.62466025", "0.6230972", "0.6165624", "0.61602473", "0.6127933", "0.6103531", "0.6076643", "0.60480684", "0.60202795", "0.5990248", "0.5957457", "0.59298575", "0.59276503", "0.592397", "0.59147286", "0.5845383", "0.58326817", "0.58326817", "0.58308834", "0.5830487", "0.5823839", "0.58169216", "0.581677", "0.5813266", "0.58003336", "0.57781535", "0.5759605", "0.5729258", "0.57233244", "0.5717099", "0.5710225", "0.57019717", "0.57019717", "0.56991553", "0.5697122", "0.5653029", "0.5651182", "0.56399715", "0.5631861", "0.5631422", "0.5600061", "0.55827713", "0.5580312", "0.5575509", "0.55669", "0.5533768", "0.55280846", "0.54846555", "0.54767627", "0.54671353", "0.5457072", "0.54554904", "0.54531443", "0.5452124", "0.5445291", "0.54371095", "0.5434044", "0.54160094", "0.54158837", "0.5404461", "0.54029626", "0.5401482", "0.5401482", "0.53928167", "0.5388901", "0.5375668", "0.5359674", "0.5354846", "0.5353263", "0.53504515", "0.53417856", "0.53386605", "0.53378284", "0.53284377", "0.531955", "0.5318442", "0.5317448", "0.5305688", "0.5304935", "0.53019524", "0.530167", "0.5288408", "0.5286916", "0.5286534", "0.52854496", "0.5272596", "0.5262659", "0.525909", "0.5257749", "0.5254186", "0.52437204", "0.52356774", "0.5223404", "0.52107584", "0.5210424" ]
0.7133982
0
make sure a given operation on the hypergraph is done
def assert_done(self, task): if task not in self.tasks_done: # do the task if not done method = getattr(self, task) method()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, node, operations, last_operation):\n if last_operation == NO_OPERATION:\n return 0\n return 1", "def do_operation(self):\n operation = self.inputs['operation']\n res = self.entity.do_operation(self.context, **self.inputs)\n if res:\n return self.RES_OK, \"Node operation '%s' succeeded.\" % operation\n else:\n return self.RES_ERROR, \"Node operation '%s' failed.\" % operation", "def run(self, in_op):\n raise NotImplementedError", "def randomly_apply_operation(operation, img, gt_boxes):\n return tf.cond(\n get_random_bool(),\n lambda: operation(img, gt_boxes),\n lambda: (img, gt_boxes)\n )", "def operation_pickling_check(instance, sim):\n pickling_check(instance)\n sim.operations += instance\n sim.run(0)\n pickling_check(instance)", "def summarize_operation(self, operation):\n print \"================ \"+ operation +\" ================\"\n sys.stdout.flush()", "def testOperation(self):\n gen = self.gen\n prof = self.profile\n\n # Try the direct evaluation\n gen.operation()\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n\n # Try evaluation through __call__\n gen(prof.x)\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n return", "def __call__(self, node, operations, last_operation):\n cost = 0\n # if MOVE_OPERATION in operations or MOVE_OPERATION == last_operation:\n # return NotImplemented\n if NO_OPERATION != last_operation:\n cost += 1\n try:\n if operations[-1] != NO_OPERATION:\n cost += 1\n except IndexError:\n pass\n return cost", "def fail_local_operation(operation, node, environment):\n run_operation(operation, node, environment, succeed=False)", "def _command_operation(self, operation):\r\n if self.current_number != '0' and\\\r\n (self.current_operation is not None) and\\\r\n self.storage_number != '0':\r\n # check if user already selected a operation\r\n # if it is, do the equal function once and display\r\n # the resolve\r\n self._command_equal()\r\n self.current_operation = operation\r\n else:\r\n # else storage the operation\r\n self.storage_number = self.current_number\r\n self.current_operation = operation\r\n self.current_number = '0'", "def testServeGraphOpInfoForOpWithNoConsumers(self):\n _generate_tfdbg_v2_data(self.logdir)\n run = self._getExactlyOneRun()\n # First, look up the graph_id and name of the Iendity op in the\n # unstack_and_sum() graph. The Identity op marks the return value of\n # the tf.function and hence has no consumer.\n response = self.server.get(\n _ROUTE_PREFIX + \"/graph_execution/digests?run=%s\" % run\n )\n data = json.loads(response.get_data())\n digests = data[\"graph_execution_digests\"]\n op_types = [digest[\"op_type\"] for digest in digests]\n add_index_0 = op_types.index(\"AddV2\")\n graph_id = digests[add_index_0][\"graph_id\"]\n # Actually query the /graphs/op_info route.\n response = self.server.get(\n _ROUTE_PREFIX\n + \"/graphs/op_info?run=%s&graph_id=%s&op_name=%s\"\n % (run, graph_id, \"Identity\")\n )\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.get_data())\n\n # Check op's self properties.\n self.assertEqual(data[\"op_type\"], \"Identity\")\n self.assertEqual(data[\"op_name\"], \"Identity\")\n # TODO(cais): Assert on detailed device name when available.\n self.assertIn(\"device_name\", data)\n # The op is inside a nested tf.function, so its graph stack must have a height > 1.\n self.assertGreater(len(data[\"graph_ids\"]), 1)\n self.assertEqual(data[\"graph_ids\"][-1], graph_id)\n self.assertNotIn(\"input_names\", data)\n self.assertEqual(data[\"num_outputs\"], 1)\n self.assertEqual(data[\"host_name\"], _HOST_NAME)\n self.assertTrue(data[\"stack_frame_ids\"])\n\n # Check input op properties.\n self.assertLen(data[\"inputs\"], 1)\n self.assertTrue(data[\"inputs\"][0][\"op_name\"])\n self.assertIsInstance(data[\"inputs\"][0][\"op_name\"], str)\n self.assertEqual(data[\"inputs\"][0][\"output_slot\"], 0)\n input0 = data[\"inputs\"][0][\"data\"]\n self.assertEqual(input0[\"op_type\"], \"AddV2\")\n\n # Check consumers: There should be no consumers for this Identity op.\n self.assertEqual(data[\"consumers\"], [[]])", "async def ensure_contains(self, operation: Operation):\n # Check that our network contains the operation\n if not await self.contains(operation):\n if not await self.instantiable(operation):\n raise OperationImplementationNotInstantiable(operation.name)\n else:\n raise OperationImplementationNotInstantiated(\n operation.instance_name\n )", "def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)", "def wait_for_global_operation(self, operation):\n print('Waiting for %s.' % (operation))\n while True:\n result = self.compute.globalOperations().get(\n project=self.project,\n operation=operation).execute()\n if result['status'] == 'DONE':\n print(\"Done.\")\n if 'error' in result:\n print('Global operations error', result['error'])\n raise RegionOperationsError(result['error'])\n return result\n time.sleep(1)", "def clean(self):\n self.sess.run(self.init_op)\n print(\"Clean the running state of graph!\")", "def _execute(self, op, time):\n raise NotImplementedError", "def testServeGraphOpInfoForOpWithNoInputs(self):\n _generate_tfdbg_v2_data(self.logdir)\n run = self._getExactlyOneRun()\n response = self.server.get(\n _ROUTE_PREFIX + \"/graph_execution/digests?run=%s\" % run\n )\n data = json.loads(response.get_data())\n digests = data[\"graph_execution_digests\"]\n op_types = [digest[\"op_type\"] for digest in digests]\n graph_ids = [digest[\"graph_id\"] for digest in digests]\n placeholder_op_index = op_types.index(\"Placeholder\")\n op_name = digests[placeholder_op_index][\"op_name\"]\n graph_id = digests[placeholder_op_index][\"graph_id\"]\n # Actually query the /graphs/op_info route.\n response = self.server.get(\n _ROUTE_PREFIX\n + \"/graphs/op_info?run=%s&graph_id=%s&op_name=%s\"\n % (run, graph_id, op_name)\n )\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.get_data())\n\n # Check op's self properties.\n self.assertEqual(data[\"op_type\"], \"Placeholder\")\n self.assertTrue(data[\"op_name\"])\n # TODO(cais): Assert on detailed device name when available.\n self.assertIn(\"device_name\", data)\n # The op is inside a nested tf.function, so its graph stack must have a height > 1.\n self.assertNotIn(\"graph_id\", data)\n self.assertGreater(len(data[\"graph_ids\"]), 1)\n self.assertEqual(data[\"graph_ids\"][-1], graph_id)\n self.assertNotIn(\"input_names\", data)\n self.assertEqual(data[\"num_outputs\"], 1)\n self.assertEqual(data[\"host_name\"], _HOST_NAME)\n self.assertTrue(data[\"stack_frame_ids\"])\n\n # Check input op properties: The Placeholder has no inputs.\n self.assertEqual(data[\"inputs\"], [])\n\n # Check consumers.\n self.assertLen(data[\"consumers\"], 1)\n self.assertEmpty(data[\"consumers\"][0])", "def testoptdone(self):\r\n assert self.data.optdone\r\n target_e, target_g, target_s = self.data.geotargets\r\n value_e, value_g, value_s = self.data.geovalues[-1]\r\n converged = (value_e < target_e and value_g < target_g) or (value_g < target_g and value_s < target_s)\r\n assert converged", "def isOp(self):\n return True", "def is_action_applied(instance: Algorithm) -> bool:\n if len(get_results(instance)) == 0:\n return False\n return True", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def operation(self):\n pass", "def test_no_op(self, graph_entry_class):\n graph_entry_class.return_value.state = \"no-op\"\n graph_entry_class.return_value.path = \"foo/app1\"\n graph_entry_class.return_value.execute.return_value = (0, ['Success'], True)\n\n graph = ApplyGraph('plan', self.graph, [], \"bar\")\n\n graph.execute_graph()\n graph.execute_post_graph()\n\n expected_not_applied = {'foo/app1'}\n self.assertEqual(\n graph_entry_class.return_value.execute.mock_calls,\n []\n )\n self.assertEqual(graph.not_applied, expected_not_applied)", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def next_operation(self):\n raise NotImplementedError", "def finish_training(self, error: bool = False, **info):\n pass", "def test_operation_pack() -> None:\n opt = options()\n server = MockServer()\n db, store = server.new_connection()\n\n a = _graph.constant_artefact(db, store, b\"bla bla\")\n b = _graph.constant_artefact(db, store, b\"bla bla bla\")\n fun = f.Funsie(\n how=f.FunsieHow.shell,\n what=\"cat infile\",\n inp={\"infile\": Encoding.blob},\n out={\"out\": Encoding.json},\n extra={},\n )\n op = _graph.make_op(db, fun, {\"infile\": a}, opt)\n op2 = _graph.Operation.grab(db, op.hash)\n assert op == op2\n\n with pytest.raises(AttributeError):\n op = _graph.make_op(db, fun, {}, opt)\n\n with pytest.raises(AttributeError):\n # no inputs\n op = _graph.make_op(db, fun, {}, opt)\n\n with pytest.raises(AttributeError):\n # too many inputs\n op = _graph.make_op(db, fun, {\"infile\": a, \"infile2\": b}, opt)\n\n with pytest.raises(RuntimeError):\n op = _graph.Operation.grab(db, hash_t(\"b\"))", "async def run_no_retry(\n self,\n ctx: BaseInputSetContext,\n octx: BaseOrchestratorContext,\n operation: Operation,\n inputs: Dict[str, Any],\n ) -> Union[bool, Dict[str, Any]]:\n # Check that our network contains the operation\n await self.ensure_contains(operation)\n # Create an opimp context and run the operation\n async with self.operations[operation.instance_name](\n ctx, octx\n ) as opctx:\n self.logger.debug(\"---\")\n self.logger.debug(\n \"Stage: %s: %s\",\n operation.stage.value.upper(),\n operation.instance_name,\n )\n str_inputs = str(inputs)\n self.logger.debug(\n \"Inputs: %s\",\n str_inputs\n if len(str_inputs) < 512\n else (str_inputs[:512] + \"...\"),\n )\n self.logger.debug(\n \"Conditions: %s\",\n dict(\n zip(\n map(\n lambda condition: condition.name,\n operation.conditions,\n ),\n ([True] * len(operation.conditions)),\n )\n ),\n )\n outputs = await opctx.run(inputs)\n str_outputs = str(outputs)\n self.logger.debug(\n \"Outputs: %s\",\n str_outputs\n if len(str_outputs) < 512\n else (str_outputs[:512] + \"...\"),\n )\n self.logger.debug(\"---\")\n return outputs", "def do_workload(self):\n pass", "def _legal_operations(self, model, tabu_list=[], max_indegree=None):\n\n local_score = self.scoring_method.local_score\n nodes = model.event_nodes\n potential_new_edges = (set(permutations(nodes, 2)) -\n set(model.edges()) -\n set([(Y, X) for (X, Y) in model.edges()]) -\n set([X for X, Y in model.relation_map.items() if len(Y) == 0]))\n\n for (X, Y) in potential_new_edges: # (1) add single edge\n if nx.is_directed_acyclic_graph(nx.DiGraph(list(model.edges()) + [(X, Y)])):\n if self.valid_temporal_relations([(X, Y)], model):\n operation = ('+', (X, Y))\n if operation not in tabu_list:\n old_parents = list(model.get_parents(Y))\n new_parents = old_parents + [X]\n if max_indegree is None or len(new_parents) <= max_indegree:\n temporal_node_parents = [X, Y]\n temporal_node = ITBN.temporal_node_marker + X + \"_\" + Y\n score_delta = (local_score(Y, new_parents) -\n local_score(Y, old_parents) +\n local_score(temporal_node, temporal_node_parents))\n yield(operation, score_delta)\n\n for (X, Y) in model.event_edges(): # (2) remove single edge\n operation = ('-', (X, Y))\n if operation not in tabu_list:\n old_parents = list(model.get_parents(Y))\n new_parents = old_parents[:]\n new_parents.remove(X)\n temporal_node_parents = [X, Y]\n temporal_node = ITBN.temporal_node_marker + X + \"_\" + Y\n score_delta = (local_score(Y, new_parents) -\n local_score(Y, old_parents) -\n local_score(temporal_node, temporal_node_parents))\n yield(operation, score_delta)\n\n for (X, Y) in model.event_edges(): # (3) flip single edge\n if len(model.relation_map[(Y, X)]) > 0:\n new_edges = list(model.edges()) + [(Y, X)]\n new_edges.remove((X, Y))\n if nx.is_directed_acyclic_graph(nx.DiGraph(new_edges)):\n if self.valid_temporal_relations([(X, Y)], model):\n operation = ('flip', (X, Y))\n if operation not in tabu_list and ('flip', (Y, X)) not in tabu_list:\n old_X_parents = list(model.get_parents(X))\n old_Y_parents = list(model.get_parents(Y))\n new_X_parents = old_X_parents + [Y]\n new_Y_parents = old_Y_parents[:]\n new_Y_parents.remove(X)\n if max_indegree is None or len(new_X_parents) <= max_indegree:\n temporal_node_parents = [Y, X]\n temporal_node = ITBN.temporal_node_marker + Y + \"_\" + X\n old_temp_node_parents = [X, Y]\n old_temp_node = ITBN.temporal_node_marker + X + \"_\" + Y\n score_delta = (local_score(X, new_X_parents) +\n local_score(Y, new_Y_parents) -\n local_score(X, old_X_parents) -\n local_score(Y, old_Y_parents) +\n local_score(temporal_node, temporal_node_parents) -\n local_score(old_temp_node, old_temp_node_parents))\n yield(operation, score_delta)", "def wait_for_operation(\n self,\n operation: dict,\n max_polls: int = MAX_POLLS,\n poll_interval: int = POLL_INTERVAL,\n ) -> dict:\n return None", "def test_removes_empty_subgraph(self):\n ctx = MockWorkflowContext()\n g = TaskDependencyGraph(ctx)\n\n # sg1 is just empty, no tasks inside it\n sg1 = g.subgraph(ctx)\n # sg2 contains only a NOPTask\n sg2 = g.subgraph(ctx)\n sg2.add_task(tasks.NOPLocalWorkflowTask(ctx))\n\n # sg3 contains sg4, which is empty behcause it only contains a NOPTask\n sg3 = g.subgraph(ctx)\n sg4 = g.subgraph(ctx)\n sg4.add_task(tasks.NOPLocalWorkflowTask(ctx))\n sg3.add_task(sg4)\n\n # sg5 is a subgraph that contains a real task! it is not removed\n sg5 = g.subgraph(ctx)\n real_task = tasks.WorkflowTask(ctx)\n sg5.add_task(real_task)\n\n assert set(g.tasks) > {sg1, sg2, sg3, sg4, sg5, real_task}\n g.optimize()\n assert set(g.tasks) == {sg5, real_task}", "def verify_operation(function):\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n if PartialState().distributed_type == DistributedType.NO or not PartialState().debug:\n return function(*args, **kwargs)\n operation = f\"{function.__module__}.{function.__name__}\"\n if \"tensor\" in kwargs:\n tensor = kwargs[\"tensor\"]\n else:\n tensor = args[0]\n shapes = get_shape(tensor)\n output = gather_object([shapes])\n if output[0] is not None:\n are_same = output.count(output[0]) == len(output)\n if not are_same:\n process_shape_str = \"\\n - \".join([f\"Process {i}: {shape}\" for i, shape in enumerate(output)])\n raise DistributedOperationException(\n f\"Cannot apply desired operation due to shape mismatches. \"\n \"All shapes across devices must be valid.\"\n f\"\\n\\nOperation: `{operation}`\\nInput shapes:\\n - {process_shape_str}\"\n )\n return function(*args, **kwargs)\n\n return wrapper", "def run(self, in_op):\n self.move_inner_state(in_op)\n if isinstance(in_op, memops.ReorderBase):\n self.substitute_reorder(in_op)\n elif isinstance(in_op, memops.FlushBase):\n self.flush_stores(in_op)\n elif isinstance(in_op, memops.Store):\n self._ops_list.append(in_op)\n elif isinstance(in_op, memops.Register_file):\n self.reg_file(in_op)\n\n return True", "def proceed(self):\n pass", "def on_operation(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None", "def fluidOperationCompletionCallback(request):\n\n global finished_operation, is_executing_operation\n print(\"Finished operation \" + request.operation)\n finished_operation = request.operation\n is_executing_operation = False\n return OperationCompletionResponse()", "def finish_learning(self):\n pass", "def done(self) -> bool:", "async def contains(self, operation: Operation) -> bool:\n return operation.instance_name in self.operations", "def local_operation(operation, node, environment):\n run_operation(operation, node, environment)", "def check(self, dgraph, **params):\n raise NotImplementedError", "def test_redeploy_edges(self):\n pass", "def testServeGraphOpInfoForOpWithInputsAndConsumers(self):\n _generate_tfdbg_v2_data(self.logdir)\n run = self._getExactlyOneRun()\n # First, look up the graph_id and name of the 1st AddV2 op.\n response = self.server.get(\n _ROUTE_PREFIX + \"/graph_execution/digests?run=%s\" % run\n )\n data = json.loads(response.get_data())\n digests = data[\"graph_execution_digests\"]\n op_types = [digest[\"op_type\"] for digest in digests]\n op_index = op_types.index(\"AddV2\")\n graph_id = digests[op_index][\"graph_id\"]\n op_name = digests[op_index][\"op_name\"]\n # Actually query the /graphs/op_info route.\n response = self.server.get(\n _ROUTE_PREFIX\n + \"/graphs/op_info?run=%s&graph_id=%s&op_name=%s\"\n % (run, graph_id, op_name)\n )\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.get_data())\n\n # Check op's self properties.\n self.assertEqual(data[\"op_type\"], \"AddV2\")\n self.assertEqual(data[\"op_name\"], digests[op_index][\"op_name\"])\n # TODO(cais): Assert on detailed device name when available.\n self.assertIn(\"device_name\", data)\n # The op is inside a nested tf.function, so its graph stack must have a\n # height > 1.\n self.assertGreater(len(data[\"graph_ids\"]), 1)\n # All graph_ids should be non-empty strings.\n self.assertTrue(all(data[\"graph_ids\"]))\n # All graph_ids should be unique (graph recursion is not currently\n # allowed in TF.)\n self.assertLen(set(data[\"graph_ids\"]), len(data[\"graph_ids\"]))\n self.assertNotIn(\"graph_id\", data)\n self.assertEqual(data[\"graph_ids\"][-1], digests[op_index][\"graph_id\"])\n self.assertNotIn(\"input_names\", data)\n self.assertEqual(data[\"num_outputs\"], 1)\n self.assertLen(data[\"output_tensor_ids\"], 1)\n self.assertIsInstance(data[\"output_tensor_ids\"][0], int)\n self.assertEqual(data[\"host_name\"], _HOST_NAME)\n self.assertTrue(data[\"stack_frame_ids\"])\n\n # Check input op properties.\n inputs = data[\"inputs\"]\n # The two input tensors to the AddV2 op are from the same Unpack\n # (unstack) op that provides 4 outputs.\n self.assertTrue(inputs[0][\"op_name\"])\n self.assertEqual(inputs[0][\"output_slot\"], 0)\n self.assertTrue(inputs[1][\"op_name\"])\n self.assertEqual(inputs[1][\"output_slot\"], 1)\n input0 = inputs[0][\"data\"]\n input1 = inputs[1][\"data\"]\n for inpt in (input0, input1):\n self.assertEqual(inpt[\"op_type\"], \"Unpack\")\n self.assertNotIn(\"input_names\", inpt)\n self.assertEqual(inpt[\"num_outputs\"], 4)\n self.assertLen(inpt[\"output_tensor_ids\"], 4)\n self.assertEqual(inpt[\"host_name\"], _HOST_NAME)\n self.assertEqual(inpt[\"graph_ids\"], data[\"graph_ids\"])\n self.assertLen(inpt[\"inputs\"], 1)\n self.assertTrue(inpt[\"inputs\"][0][\"op_name\"])\n self.assertIsInstance(inpt[\"inputs\"][0][\"op_name\"], str)\n self.assertEqual(inpt[\"inputs\"][0][\"output_slot\"], 0)\n self.assertNotIn(\"data\", inpt[\"inputs\"][0][\"op_name\"])\n self.assertLen(inpt[\"consumers\"], 4)\n self.assertLen(inpt[\"consumers\"][0], 1)\n self.assertEqual(inpt[\"consumers\"][0][0][\"input_slot\"], 0)\n self.assertNotIn(\"data\", inpt[\"consumers\"][0][0])\n self.assertLen(inpt[\"consumers\"][1], 1)\n self.assertEqual(inpt[\"consumers\"][1][0][\"input_slot\"], 1)\n self.assertNotIn(\"data\", inpt[\"consumers\"][1][0])\n self.assertLen(inpt[\"consumers\"][2], 1)\n self.assertEqual(inpt[\"consumers\"][2][0][\"input_slot\"], 1)\n self.assertNotIn(\"data\", inpt[\"consumers\"][2][0])\n self.assertLen(inpt[\"consumers\"][3], 1)\n self.assertEqual(inpt[\"consumers\"][3][0][\"input_slot\"], 1)\n self.assertNotIn(\"data\", inpt[\"consumers\"][3][0])\n\n # Check consuming op properties.\n self.assertLen(data[\"consumers\"], 1)\n self.assertLen(data[\"consumers\"][0], 1)\n # The AddV2 is consumed by another AddV2 op in the same graph.\n self.assertTrue(data[\"consumers\"][0][0][\"op_name\"])\n self.assertIsInstance(data[\"consumers\"][0][0][\"op_name\"], str)\n self.assertEqual(data[\"consumers\"][0][0][\"input_slot\"], 0)\n consumer = data[\"consumers\"][0][0][\"data\"]\n self.assertEqual(consumer[\"op_type\"], \"AddV2\")\n self.assertTrue(consumer[\"op_name\"])\n self.assertNotEqual(consumer[\"op_name\"], data[\"op_name\"])\n self.assertEqual(consumer[\"num_outputs\"], 1)\n self.assertLen(consumer[\"output_tensor_ids\"], 1)\n self.assertIsInstance(consumer[\"output_tensor_ids\"][0], int)\n self.assertEqual(consumer[\"host_name\"], _HOST_NAME)\n self.assertTrue(consumer[\"stack_frame_ids\"])\n self.assertLen(consumer[\"inputs\"], 2)\n self.assertEqual(consumer[\"inputs\"][0][\"op_name\"], data[\"op_name\"])\n self.assertEqual(consumer[\"inputs\"][0][\"output_slot\"], 0)\n self.assertNotIn(\"data\", consumer[\"inputs\"][0])\n self.assertEqual(consumer[\"inputs\"][1][\"output_slot\"], 2)\n self.assertNotIn(\"data\", consumer[\"inputs\"][1])\n self.assertLen(consumer[\"consumers\"], 1)\n self.assertLen(consumer[\"consumers\"][0], 1)\n self.assertTrue(consumer[\"consumers\"][0][0][\"op_name\"])\n self.assertIsInstance(consumer[\"consumers\"][0][0][\"op_name\"], str)\n self.assertEqual(consumer[\"consumers\"][0][0][\"input_slot\"], 0)\n self.assertNotIn(\"data\", consumer[\"consumers\"][0][0])", "def _check_for_completion(self, node):\n dis=0\n for i in range(node.state.size):\n dis+=(node.state[i]-self.goal.state[i])**2\n\n dis=np.sqrt(dis)\n if(dis<=self.step_size):\n return True\n else: return False", "def is_runnable(graph, obj):\n connections_to_remove = []\n pred_errored = []\n is_done = None\n for pred in graph.predecessors[obj.name]:\n try:\n is_done = pred.done\n except ValueError:\n pred_errored.append(pred)\n\n if is_done is True:\n connections_to_remove.append(pred)\n elif is_done is False:\n return False\n\n if pred_errored:\n return pred_errored\n\n # removing nodes that are done from connections\n for nd in connections_to_remove:\n graph.remove_nodes_connections(nd)\n\n return True", "def _hasStatefulPartitionedCallOp(self, graph_def):\n for node in graph_def.node:\n if node.op == \"StatefulPartitionedCall\":\n return True\n return False", "def control_flow_op(op):\n return (control_flow_util.IsSwitch(op) or\n control_flow_util.IsMerge(op))", "def _compute_is_terminal(self):\n # self.n_actions contains a number of unlabelled datapoints that is left\n if self.n_actions==1:\n # print('We ran out of samples!')\n done = True\n else:\n done = False\n return done", "def testoptdone(self):\r\n assert self.data.optdone\r\n assert numpy.all(numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets)", "def testoptdone(self):\r\n assert self.data.optdone\r\n convergence = numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets\r\n assert sum(convergence) >= 2", "def testoptdone(self):\r\n\r\n assert self.data.optdone\r\n\r\n targets = self.data.geotargets\r\n values = numpy.abs(self.data.geovalues[-1])\r\n\r\n target_e = targets[0]\r\n target_g = targets[1:3]\r\n target_x = targets[3:]\r\n value_e = values[0]\r\n value_g = values[1:3]\r\n value_x = values[3:]\r\n\r\n conv_all = all(values < targets)\r\n conv_e = value_e < 25*target_e and all(value_g < target_g) and all(value_x < target_x)\r\n conv_g = value_e < target_e and all(value_g < target_g/3.0) and all(value_x < target_x*3.0)\r\n conv_x = value_e < target_e and all(value_g < target_g*3.0) and all(value_x < target_x/3.0)\r\n converged = conv_all or conv_e or conv_g or conv_x\r\n assert converged", "def process_next(cls):\n db = cls._core.get_db()\n configuration = cls._core.get_configuration()\n if os.path.exists(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\"):\n return False\n lockfile = open(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\",\"w\")\n lockfile.close()\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 \\\n WHERE OPE_ID IN ( \\\n SELECT OPE_ID FROM OPERATIONS \\\n WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0 \\\n AND OPE_INVOKED = ( \\\n SELECT MIN(OPE_INVOKED) FROM OPERATIONS \\\n WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0) \\\n ) ;\"\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 1 ;\"\n db.query(cls._core,stmnt_lock,commit=True)\n cur = db.query(cls._core,stmnt)\n res = cur.fetchallmap()\n if len(res) > 0:\n operation = cls.restore_operation(res[0])\n try:\n cls.process_children(operation)\n operation.do_workload()\n except Exception, e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(operation.get_id(),),commit=True)\n error = StringIO()\n print_exc(None,error)\n cls._core.log(error.getvalue())\n ret = True\n else:\n ret = False\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_STATUS = 1 ;\"\n db.query(cls._core,stmnt_delete,commit=True)\n db.commit()\n try:\n os.unlink(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\")\n except OSError,e :\n raise OperationException(OperationException.get_msg(0))\n return ret", "def _is_done(self):\n pass", "def wait_for_region_operation(self, operation):\n print('Waiting for %s.' %(operation))\n while True:\n result = self.compute.regionOperations().get(\n project=self.project,\n region=self.region,\n operation=operation).execute()\n if result['status'] == 'DONE':\n print(\"Done.\")\n if 'error' in result:\n print('Region operations error', result['error'])\n raise RegionOperationsError(result['error'])\n return result\n time.sleep(1)", "def _proceed(self):\n raise NotImplementedError", "def compute(self, pred, target):\n pass", "def test_graph_cant_delete_an_unpresent_node(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.del_nodes(3.14)", "def perform(self):\n pass", "def mark_cur_op_complete(self, cur_op: Callable) -> None:\n # torch.nn.Module __setattr__ has overhead,\n # this code is the explicit fast path for `self.idx += 1`\n object.__setattr__(self, 'idx', self.idx + 1)", "def __call__(self, tf_node, input_ops):\n op_name = tf_node.op\n\n # if op not handled, gets -1\n ng_op = getattr(self, op_name, None)\n\n if ng_op:\n return ng_op(tf_node, input_ops)\n else:\n # ignored op set to None\n print(tf_node.name, \"ignored.\")\n return None", "def run(graph, time_axis, initial, element2edge, var, element_component_clause_literal_node, ts, type_num,\n type_robot_label, buchi, show, last_subtask=None, loop=False):\n\n frontier = [[initial, -1, []]]\n # iterate until the accepting state is reached\n while True:\n if show:\n print([f[0] for f in frontier])\n node, clock, acpt_run_ = frontier.pop()\n\n # Determine the set of identical time instants\n instant_element = time_axis[clock + 1]\n if acpt_run_:\n pre_neg_edge = acpt_run_[-1]['neg_edge']\n else:\n pre_neg_edge = []\n # loop over each successor to see whether progress can be made\n for succ in graph.succ[node]:\n # equivalent subtask\n if graph.edges[element2edge[instant_element[1]]]['formula'] == graph.edges[(node, succ)]['formula'] and \\\n graph.nodes[element2edge[instant_element[1]][0]]['formula'] == graph.nodes[node]['formula']:\n # if isEquivalent(graph.edges[element2edge[instant_element[1]]]['formula'], graph.edges[(node, succ)]['formula']) and \\\n # isEquivalent(graph.nodes[element2edge[instant_element[1]][0]]['formula'], graph.nodes[node]['formula']):\n\n # print((node, succ), graph.edges[(node, succ)]['formula'])\n # whether the collection of paths at clock satisfies the edge label\n # neg_literal: negative clause that needs to be addressed\n # exe_robot: set of robots that takes the subtask with nonzero id\n\n essential_clause_edge, neg_clause_edge, exe_robots_edge \\\n = determine_essentials(instant_element, var, graph.edges[(node, succ)]['label'],\n graph.edges[(node, succ)]['neg_label'], 1,\n element_component_clause_literal_node, ts, type_num,\n type_robot_label, last_subtask, buchi, [], loop)\n\n essential_clause_vertex, neg_clause_vertex, exe_robots_vertex \\\n = determine_essentials(instant_element, var, graph.nodes[node]['label'],\n graph.nodes[node]['neg_label'], 0,\n element_component_clause_literal_node, ts, type_num, dict(),\n last_subtask, buchi,\n pre_neg_edge, loop)\n\n # clock, the exact time when transition occurs\n acpt_run = acpt_run_.copy() # copy the history\n acpt_run.append({'subtask': (node, succ), 'time_element': time_axis[clock + 1],\n 'essential_robot_edge': exe_robots_edge,\n 'essential_clause_edge': essential_clause_edge, 'neg_edge': neg_clause_edge,\n 'essential_robot_vertex': exe_robots_vertex,\n 'neg_vertex': neg_clause_vertex})\n\n # stop when accept is reached\n if 'accept' in succ:\n return acpt_run\n # clock + 1, after reaching succ, the immediate time clock that should be verified\n frontier.append([succ, clock + 1, acpt_run])", "def Main(operation, args):\n\n caller = args[0] # used with CheckWitness below to conform authorization\n caller_is_authorized = CheckWitness(caller) # Boolean\n\n if not caller_is_authorized:\n print('Action denied.')\n return False\n\n print('Action granted.')\n \n input_inheritage_datum = args[1]\n caller_with_input_will_or_inheritage_datum = concat(caller, input_inheritage_datum)\n\n # Set testator_or_heir to the optional third argument or use the caller\n if len(args) == 3:\n testator_or_heir = args[2]\n legal_entity_with_inheritage_datum = concat(testator_or_heir, input_inheritage_datum)\n else:\n testator_or_heir = caller\n legal_entity_with_inheritage_datum = caller_with_input_will_or_inheritage_datum\n\n\n if operation != None:\n\n\n if operation == 'RegisterWillOrInheritage': \n \"\"\"\n Register will or equity specification \n document to the contract caller.\n \"\"\"\n storage_occupying_name = Get(GetContext, input_inheritage_datum)\n\n if storage_occupying_name:\n print(storage_occupying_name)\n \n else:\n Put(GetContext, input_inheritage_datum, caller)\n \n print(\"Your will was successfully registered.\")\n \n return True\n\n\n if operation == 'SetInheritage': \n \"\"\"\n Set a testator_or_heir for a registered equity.\n \"\"\"\n if authorization_check(input_inheritage_datum):\n Put(GetContext, legal_entity_with_inheritage_datum, testator_or_heir)\n \n print(\"The inheritage was successfully set to legal entity.\")\n\n return True\n\n\n if operation == 'QueryInheritage':\n \"\"\"\n Quiery the legal testator_or_heir of an inheritage.\n \"\"\"\n legal_testator_or_heir = Get(GetContext, legal_entity_with_inheritage_datum)\n \n print(legal_testator_or_heir)\n\n if legal_testator_or_heir:\n return True\n\n\n if operation == 'CancelInheritage':\n if authorization_check(input_inheritage_datum):\n testator_or_heir_to_del = args[2]\n\n inheritage_to_be_removed = concat(testator_or_heir_to_del, input_inheritage_datum)\n Delete(GetContext, inheritage_to_be_removed)\n\n print(\"The inheritance was successfully removed from the will.\")\n\n return True\n\n\n if operation == 'ChangeInheritage':\n legal_entity = Get(GetContext, caller_with_input_will_or_inheritage_datum)\n\n if legal_entity:\n is_authorized_legal_entity = CheckWitness(legal_entity)\n\n if is_authorized_legal_entity:\n changed_testator_or_heir = args[2]\n changed_testator_with_input_inheritage_datum = concat(changed_testator_or_heir, input_inheritage_datum)\n Delete(GetContext, caller_with_input_will_or_inheritage_datum)\n Put(GetContext, changed_testator_with_input_inheritage_datum, changed_testator_or_heir)\n\n print(\"Your will has changed.\")\n\n return True\n\n\n return False", "def step(self):\n #1. Time progresses\n self.time_operator.step()\n \n #2. Form and dissolve relationships\"\n self.relationship_operator.step()\n\n #3. HIV transmission\n self.infection_operator.step()", "def __call__(self, node):\n return True;\n predcount = self.CountPred(node);\n if predcount == 0: return True;\n return len(node.predicates) != 0;", "def _step(self, action):\n\n # action is generated from the action_policy (external to the environment)\n if len(action) == 4:\n object_index, new_location, action_means, action_stds = action\n if len(action) == 2:\n \"\"\"\n Action is not generated from a Gaussian distribution\n \"\"\"\n object_index, new_location = action\n action_means = action_stds = None\n \n position = new_location[:2]\n rotation = new_location[2]\n\n prev_transform = self.e.objects[object_index].transform\n\n if len(self.action_storage) > 0:\n last_progress = self.action_storage[-1][4]\n else:\n last_progress = 0\n\n info = {}\n if self.e.act(object_index, Command(position, rotation)):\n # print ('Action accepted')\n cur_transform = self.e.objects[object_index].transform\n # I need to call self.action_storage.append before get_observation_and_progress\n self.action_storage.append( [object_index, prev_transform, cur_transform, None, None, True, action_means, action_stds] )\n observation, progress = self.get_observation_and_progress()\n self.action_storage[-1][3:5] = [observation, progress]\n\n info['action_accepted'] = True\n else:\n \"\"\"\n Action failed\n We can reduce the progress to avoid falling out of the table\n \"\"\"\n if len(self.action_storage) > 0:\n # Just return observation and progress of last action\n _, _, _, observation, progress, _, _, _ = self.action_storage[-1]\n progress -= self.config.failed_action_penalty\n else:\n # First action failed\n observation, _ = self.get_observation_and_progress()\n progress = -self.config.failed_action_penalty\n \n self.action_storage.append( [object_index, prev_transform, prev_transform, observation, progress, False, action_means, action_stds] )\n\n \n info['action_accepted'] = False\n\n # Typical threshold approach\n if progress > self.progress_threshold:\n # Finish action\n done = True\n else:\n done = False\n \n reward = progress - last_progress\n #print ('Progress = %.2f ; reward = %.2f' % (progress, reward))\n\n return (observation, reward, done, info)", "def check_nodes(self) -> bool:\n # check the input-output consistency\n for op_name in self.__ops:\n op = cast(Operator, self.__ops[op_name])\n inputs: Dict[str, Operator] = op.input_ops\n for i in inputs.values():\n if op not in i.output_op_list:\n return False\n\n return True", "def apply(self, fgraph):\r\n pass", "def action_done(self):\n pass", "def workflow_complete():\n\n if request.method == \"POST\":\n \"\"\"\n request looks like:\n {\n \"workflow_name\": \"test-workflow\",\n \"dataset_id\": \"HRI107\",\n \"operation\": \"std-dev\",\n \"PID\": 1\n \"other_cardinals\": [(2, \"23.45.67.89\"), (3, \"34.56.78.90\")],\n \"jiff_server\": \"45.67.89.01\"\n }\n \"\"\"\n\n req = request.get_json(force=True)\n\n pods = get_pod_by_workflow_and_pid(req[\"workflow_name\"], req[\"PID\"])\n if pods is not None:\n for pod in pods:\n delete_entry(pod)\n\n jiff_server = get_jiff_server_by_workflow(req[\"workflow_name\"])\n if jiff_server is not None:\n delete_entry(jiff_server)\n\n orch = Orchestrator(req, app, len(get_running_workflows()))\n\n orch.stop_workflow()\n\n app.logger.info(f\"Workflow {req['workflow_name']} complete, removed from running jobs.\")\n\n event_timestamps = get_pod_event_timestamp_by_workflow_and_pid(req['workflow_name'],req['PID'])\n if event_timestamps is not None:\n delete_entry(event_timestamps)\n\n event_timestamps_dict = {x.name: str(getattr(event_timestamps, x.name)) for x in event_timestamps.__table__.columns}\n\n pod_resource_usage = get_pod_resource_consumption_by_workflow_and_pid(req['workflow_name'],req['PID'])\n usage = {'cpu': {'avg': None, 'max': None}, 'memory': {'avg': None, 'max': None}}\n if pod_resource_usage is not None:\n cpu_consumptions = [obj.cpu_usage for obj in pod_resource_usage]\n memory_consumptions = [obj.memory_usage for obj in pod_resource_usage]\n\n if len(cpu_consumptions) > 0:\n usage['cpu'] = {\n 'avg': sum(cpu_consumptions) / len(cpu_consumptions),\n 'max': max(cpu_consumptions)\n }\n\n if len(memory_consumptions) > 0:\n usage['memory'] = {\n 'avg': sum(memory_consumptions) / len(memory_consumptions),\n 'max': max(memory_consumptions)\n }\n\n for obj in pod_resource_usage:\n delete_entry(obj)\n\n app.logger.info(\"ABOUT TO send pod stats\")\n orch.send_pod_stats(usage, event_timestamps_dict)\n response = {\n \"MSG\": \"OK\",\n \"timestamps\": event_timestamps_dict,\n \"resource_consumption\": usage\n }\n else:\n\n app.logger.error(\n f\"Received request indicating the workflow {req['workflow_name']} \"\n f\"completed, but this workflow is not present in running jobs\"\n f\"record. Nothing to do.\")\n response = {\n \"MSG\": f\"ERR: {req['workflow_name']} not in running jobs record.\"\n }\n\n return jsonify(response)", "def updateState(self):\n\n if ('cutting' in self.step_ops) and (self.cut_state.user_cutting):\n self.step_ops['cutting'] = True\n \n if ('cooking' in self.step_ops) and (self.cut_state.user_cooking):\n self.step_ops['cooking'] = True\n\n # TODO: add the rest of the operations\n\n advance = True\n\n # Check if ALL operations are complete\n for op in self.step_ops:\n if self.step_ops[op] == False:\n advance = False\n break\n\n if advance:\n self.nextStep()", "def check(iters):\n\treturn check_hypernet(iters[0]) and check_other(iters[1])", "def isdone(node_dict):\n # compute heading difference\n hdiff = heading_diff(r_target, node_dict['pn'].heading)\n # return if we are we close enough\n return abs(hdiff) < abs(tol)", "def test_restore_empty(self):\n assert self._restore_graph([]).tasks == []", "def test_graph_cant_delete_without_argument(graph_no_edges):\n with pytest.raises(TypeError):\n graph_no_edges.del_nodes()", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def act(self, action_values: Tensor) -> Tensor:\n ...", "def test_post_graph(self, graph_entry_class):\n graph_entry_class.return_value.state = \"no-op\"\n graph_entry_class.return_value.path = \"foo/app1\"\n graph_entry_class.return_value.execute.return_value = (0, ['Success'], True)\n\n graph = ApplyGraph('plan', self.graph, self.post_graph, \"bar\")\n\n graph.execute_graph()\n graph.execute_post_graph()\n\n self.assertEqual(\n graph_entry_class.return_value.execute.mock_calls,\n []\n )\n self.assertTrue(len(graph.not_applied) == 2)", "def execute(self, userdata):\n\n start_time = rospy.Time.now()\n self._robot.head.look_at_standing_person()\n operator = userdata.operator_learn_in\n while not operator:\n r = rospy.Rate(1.0)\n if self.preempt_requested():\n return 'aborted'\n\n if(rospy.Time.now() - start_time).to_sec() > self._operator_timeout:\n return 'failed'\n\n operator = self._robot.ed.get_closest_laser_entity(\n radius=0.5,\n center_point=VectorStamped.from_xyz(1, 0, 1, rospy.Time(), self._robot.base_link_frame))\n rospy.loginfo(\"Operator: {op}\".format(op=operator))\n if not operator:\n options = [\"Please stand in front of me.\",\n \"My laser can't see you, please get closer.\",\n \"Where are you? Please get closer.\"]\n sentence = random.choice(options)\n self._robot.speech.speak(sentence)\n else:\n self._robot.speech.speak(\"Please look at me while I learn to recognize you.\",\n block=False)\n self._robot.head.look_at_standing_person()\n learn_person_start_time = rospy.Time.now()\n detection_counter = 0\n while detection_counter < self._detection_threshold:\n if self._robot.perception.learn_person(self._operator_name):\n rospy.loginfo(\"Successfully detected you %i times\" % (detection_counter + 1))\n detection_counter += 1\n elif (rospy.Time.now() - learn_person_start_time).to_sec() > self._learn_person_timeout:\n self._robot.speech.speak(\"Please stand in front of me and look at me\")\n operator = None\n break\n r.sleep()\n rospy.loginfo(\"We have a new operator: %s\" % operator.uuid)\n self._robot.speech.speak(\"Who is that handsome person? Oh, it is you!\", mood='Excited')\n self._robot.speech.speak(\n \"I will follow you now, please say , {}, stop , when we are at the car.\".format(self._robot.robot_name))\n self._robot.head.close()\n userdata.operator_learn_out = operator\n return 'done'", "def perform(self):\n raise NotImplementedError", "def process_operation(self, resources, resource, api, operation, context):\n pass", "def check_if_can_evolve(self):\n # This sounds similar to generate actions\n pass", "def main():\n graph_alg_eq()\n graph_points()\n graph_smooth_from_pts()\n\n return GOOD_RET # success", "def perform_action(self, action):\r\n t_list = self.get_action_outcomes(self.current_state, action)\r\n new_state = t_list[np.argmax(np.random.multinomial(1, [t[0] for t in t_list]))][1]\r\n # print(len(self.trajectory), ':', self.current_state, '--', action ,'-->', new_state)\r\n self.current_state = new_state\r\n self.trajectory.append(new_state)\r\n return tuple(self.current_state) == tuple(self.end_state)", "def __call__(self, pred, target):\n return NotImplemented", "def _tensor_run_opt_ext(momentum, learning_rate, gradient, weight, accum, stat, opt):\n success = True\n success = F.depend(success, opt(weight, gradient, learning_rate, accum, momentum, stat))\n return success", "def test_global_efficiency_complete_graph(self):\n for n in range(2, 10):\n G = nx.complete_graph(n)\n assert_equal(nx.global_efficiency(G), 1)", "def opOk(op, validRegs):\n for operand in op.operands:\n if not operand in reversed(validRegs):\n return False\n # If we make it here, they're all valid\n return True", "def test_no_requirements(self):\n def f():\n pass\n self._run_as_operator(f)", "def operation_check(self, methodName, isSucceed):\n if (isSucceed):\n self.log4py.info(\"method 【\" + methodName + \"】 运行通过!\");\n else:\n self.log4py.error(\"method 【\" + methodName + \"】 运行失败!\");", "def checkDone(self):\n # checks if the instructions in the pipeline are nop are not\n # if all nop then we are done\n self.__done = True\n for pi in self.pipeline:\n if pi.instr is not None:\n self.__done = False", "def testNoCatchEagerOpExecution(self):\n check_numerics_callback.enable_check_numerics()\n x = constant_op.constant([2.0, 3.0])\n y = constant_op.constant([1.0, 0.0])\n self.assertAllClose((x + y) * (x - y), [3.0, 9.0])", "def _validate_tpu_training_graph():\n operations = ops.get_default_graph().get_operations()\n\n # Check if there is atleast one CrossReplicaSum operation in the graph\n # This should be introduced by using the CrossShardOptimizer wrapper\n cross_replica_sum_ops = [\n o for o in operations if o.type == _CROSS_REPLICA_SUM_OP\n ]\n if not cross_replica_sum_ops:\n raise ValueError(\n 'CrossShardOptimizer must be used for model training on TPUs.')", "def done(self):", "def done(self):", "def runOperation(operation, num1, num2):\n if operation == 1 or operation == '+':\n print(add(num1, num2))\n elif operation == 2 or operation == '-':\n print(sub(num1, num2))\n elif operation == 3 or operation == '*':\n print(mul(num1, num2))\n elif operation == 4 or operation == '/':\n print(div(num1, num2))\n else:\n print(\"I don't understand\")", "def initialized(self):\n return len(self.ops) > 0", "def test_invalid_tensor_op_object_graph_pairing(self, data, description):\n with self.assertRaises((KeyError, AssertionError, TypeError), msg=description):\n data()", "def op_in_graph(self, op):\n # pylint: disable=protected-access\n if op._graph == self:\n return True\n # pylint: enable=protected-access\n if self._parent_graph:\n return self._parent_graph.op_in_graph(op)\n return False", "def final_check(self, test_collection):\n assert True", "def action_done(self):" ]
[ "0.6206891", "0.59665424", "0.5583435", "0.5557241", "0.54633397", "0.54628277", "0.54410225", "0.5430571", "0.54086703", "0.5403374", "0.53677326", "0.5349503", "0.53490293", "0.53462434", "0.5256265", "0.5256106", "0.52509886", "0.5231918", "0.5228386", "0.5189172", "0.5187015", "0.51792276", "0.51732016", "0.51327825", "0.5129192", "0.51278", "0.512667", "0.51021963", "0.50945926", "0.5090032", "0.50826854", "0.5068264", "0.50593436", "0.5052689", "0.5044111", "0.5043429", "0.5043218", "0.50284994", "0.49953702", "0.4992666", "0.4981358", "0.49800006", "0.4979889", "0.4974791", "0.49636582", "0.49635708", "0.49621007", "0.49616507", "0.4959061", "0.49582872", "0.49551013", "0.49525738", "0.4949683", "0.4945991", "0.49457684", "0.49435696", "0.4943359", "0.49421608", "0.49277064", "0.49240166", "0.49161562", "0.49141893", "0.49116147", "0.49066046", "0.49063727", "0.49041146", "0.49040824", "0.4898718", "0.4897494", "0.48962158", "0.48959428", "0.4895384", "0.48953468", "0.4888485", "0.48875895", "0.4882298", "0.4882017", "0.48793542", "0.48733234", "0.48732048", "0.4867768", "0.48645645", "0.48641658", "0.48614457", "0.48609444", "0.4857529", "0.48567083", "0.48445943", "0.48419696", "0.4840089", "0.48366454", "0.48333174", "0.48296696", "0.4829061", "0.4829061", "0.4817146", "0.48118028", "0.48056397", "0.48041314", "0.48004797", "0.4800103" ]
0.0
-1
show the hypergraph as a pic
def show(self): f = open('/tmp/dotty', 'w') f.write(self.dot()) f.close() os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif') os.system('eog /tmp/dotty.gif')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()", "def plot_graph(self) -> None:", "def show_custom_graph(self):\n pass", "def show():\n\tplt.show()", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show_flow(filename):\n flow = read_flow(filename)\n img = flow_to_image(flow)\n plt.imshow(img)\n plt.show()", "def print_graph(dag, image_path, graph_path):\n for node in dag.nodes():\n dag.node[node]['label'] = node.label\n nx.write_graphml(dag, graph_path)\n pos = nx.random_layout(dag)\n nx.draw_networkx(dag, ax=None, width=3, pos=pos)\n p.savefig(image_path)", "def show(image,label,pred):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n s=\"True Label : \"+str(label)+\" Predicted label : \"+str(pred)\n pyplot.xlabel(s,fontname=\"Arial\", fontsize=20 )\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def showGraph(self, file_name = \"\"):\n \n # prepare edges and weights for visualization\n edges = self.graph.edges()\n weights = [self.graph_data[u]['pheromones'][v] for u,v in edges]\n weights_sum = sum(weights)\n weights = [ (w/weights_sum)*50 for w in weights]\n \n # prepare different shades of red to be used to optionally differentiate\n # between edges with different costs\n # to show more informatiion on the same graph\n colors = []\n max_cost = max([self.graph_data[u]['costs'][v] for u,v in edges])\n for u,v in edges:\n if self.graph_data[u]['costs'][v] <= max_cost/32:\n colors.append('#ff7f7f')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/16:\n colors.append('#ff6666')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/8:\n colors.append('#ff4c4c')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/4:\n colors.append('#ff3232')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/2:\n colors.append('#ff1919')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost:\n colors.append('#ff0000')\n continue\n \n # print the graph \n pos=nx.circular_layout(self.graph)\n nx.draw( self.graph,pos=pos,node_size=200,node_color='#A8A8A8', with_labels=True,edges=edges, edge_color=colors,edge_cmap=plt.cm.Blues, width=weights)\n if file_name != \"\":\n path = \"img/\"+file_name\n plt.savefig(path, format=\"PNG\")\n plt.show()", "def show(self):\n plt.show()", "def disp_graph(graph, output_filename):\n dot = Graph(name=\"Graph\", format=\"png\") # instantiate a graph object\n for node in graph.keys(): # add nodes to the graph\n dot.node(str(node))\n for node in graph.keys(): # for every node in the input graph\n # for every other node in the input graph that the first node is connected to\n for other_node in graph[node].keys():\n dot.edge(str(node), str(other_node)) # create the edge\n dot.render(output_filename, view=True) # visualize the graph and save it", "def show_graph(self):\n graph_file = self.dump_graph()\n subprocess.check_output(shlex.split(f'gwenview {graph_file}'))", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def plot(self):\n\t\tself.plotOfIP().plot()", "def display(self):\n display(self.image)", "def _show(self, a):\n fig = plt.figure()\n fig.set_size_inches((2, 2))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n plt.set_cmap('hot')\n ax.imshow(a, aspect='equal')\n plt.show()", "def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)", "def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def render_graph(self, view=True):\n\n log.debug(\"Rendering Graph to image file [%s]\" % 'NOT_IMPLEMENTED')\n\n if view:\n self.graph.view()\n else:\n self.graph.render()", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def show():\n setup()\n plt.show()", "def show(self) -> None:\n cv.imshow(str(self.__class__), self.output_image)", "def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()", "def show_neighborhood(self, max_dist=3, detailed=True):\n dotstr = ''\n for node in self.neighbors(max_dist):\n if node is self:\n dotstr += node.dot(color='dodgerblue', detailed=detailed)\n else:\n dotstr += node.dot(detailed=detailed)\n dotstr = 'digraph hypergraph {\\nrankdir=BT\\n%s}\\n' % dotstr\n f = open('/tmp/dotty', 'w')\n f.write(dotstr)\n f.close()\n os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif')\n os.system('eog /tmp/dotty.gif')", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()", "def display(self):\n options = {\"ent_only_plot\": True,\n \"rel_only_plot\": not self.config.plot_entity_only,\n \"ent_and_rel_plot\": not self.config.plot_entity_only}\n\n if self.config.plot_embedding:\n viz = Visualization(model=self.model, vis_opts = options)\n\n viz.plot_embedding(resultpath=self.config.figures, algos=self.model.model_name, show_label=False)\n\n if self.config.plot_training_result:\n viz = Visualization(model=self.model)\n viz.plot_train_result()\n\n if self.config.plot_testing_result:\n viz = Visualization(model=self.model)\n viz.plot_test_result()", "def show_figure(self):\n pylab.show()", "def visualize(self, paths, instance, during_analysis):\r\n xvalues = np.arange(self.data.shape[0])\r\n\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n\r\n \"\"\"The visualizer now outputs images of the best-fit results to hard-disk (checkout `visualizer.py`).\"\"\"\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=self.data,\r\n title=\"Data\",\r\n ylabel=\"Data Values\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"data\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=model_data,\r\n title=\"Model Data\",\r\n ylabel=\"Model Data Values\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"model_data\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=residual_map,\r\n title=\"Residual Map\",\r\n ylabel=\"Residuals\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"residual_map\",\r\n )\r\n\r\n plot_profile_1d(\r\n xvalues=xvalues,\r\n profile_1d=chi_squared_map,\r\n title=\"Chi-Squared Map\",\r\n ylabel=\"Chi-Squareds\",\r\n color=\"k\",\r\n output_path=paths.image_path,\r\n output_filename=\"chi_squared_map\",\r\n )", "def print_graph() -> None:\n raise NotImplementedError", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def show(image, label, weights, prediction, ax):\n global img_objects\n if len(img_objects)==0:\n for i in range(10):\n _img = ax[0, i].imshow(weights[i].reshape(28,28), cmap='gray')\n img_objects.append(_img)\n _img = ax[1, 5].imshow(image.reshape(28,28), cmap='gray')\n img_objects.append(_img)\n else:\n for i in range(10):\n img_objects[i].set_data(weights[i].reshape(28,28))\n img_objects[i].set_clim(vmin=0, vmax=np.max(weights[i]))\n img_objects[10].set_data(image.reshape(28,28))\n ax[0,5].set_title('truth: %d, predict: %d'%(np.argmax(label), prediction))", "def visualize(self):\r\n self.aggregator.plot_loss()\r\n self.save_figure()", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def print_path(self):\n\n grid = tg.Graph.grid_graph(self.graph.rows,self.graph.cols)\n #tg.draw_grid(self.draw_edges_alt,self.graph.rows,self.graph.cols,grid)\n tg.draw_grid(self.edges,self.graph.rows,self.graph.cols,grid)", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n # plt.savefig('./drive/My Drive/Colab Notebooks/TACK/Large/result' + ' '.join(name.split('_')).title() + '.png')", "def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()", "def visualize(self, A):\n G = nx.from_numpy_matrix(np.array(A))\n nx.draw(G, with_labels=True)\n plt.show()\n plt.clf()\n exit(0)", "def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()", "def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()", "def visualize(original, s, m, l, s_pred, m_pred, l_pred):\n\tfig = plt.figure(figsize=(20, 10))\n\tplt.subplot(1,7,1)\n\tplt.title('Original image')\n\tplt.imshow(original)\n\n\tplt.subplot(1,7,2)\n\tplt.title('S image')\n\tplt.imshow(s)\n\tplt.subplot(1,7,3)\n\tplt.title('S Pred image')\n\tplt.imshow(s_pred)\n\n\tplt.subplot(1,7,4)\n\tplt.title('M image')\n\tplt.imshow(m)\n\tplt.subplot(1,7,5)\n\tplt.title('M Pred image')\n\tplt.imshow(m_pred)\n\n\tplt.subplot(1,7,6)\n\tplt.title('L image')\n\tplt.imshow(l)\n\tplt.subplot(1,7,7)\n\tplt.title('L Pred image')\n\tplt.imshow(l_pred)", "def graphs(self, title: str, graph_name: str) -> NoReturn:\n self.add_page()\n self.set_text_color(0, 250, 154)\n self.cell(0, 0, title, align='C', ln=2)\n self.ln(self.line_height)\n img = Image.open(full_path(f'./output/{graph_name}'))\n image_width = self.w - (self.l_margin + self.r_margin)\n image_height = self.h - (self.t_margin + self.b_margin + self.get_y())\n self.image(img, self.get_x(), self.get_y(), w=image_width,\n h=image_height)", "def plot(self):\n pass", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def draw_graph(self,path):\n print(\"Drawing HashTag Graph!\")\n\n layout = self.graph.layout(\"kk\")\n\n self.graph.vs[\"label\"] = self.graph.vs[\"name\"]\n\n\n visual_style = {}\n visual_style[\"bbox\"] = (1600,1600)\n visual_style[\"vertex_size\"] = 10\n visual_style[\"edge_width\"] = 1\n visual_style[\"edge_color\"] = 'blue'\n visual_style[\"layout\"] = layout\n visual_style[\"margin\"] = 100\n\n graph_plot = plot(self.graph, **visual_style)\n graph_plot.save(fname=path)\n\n return 0", "def display(self, image):\n raise NotImplementedError()", "def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()", "def draw_im(self, fname):\n im = self.get_im()\n im_agraph = nx.nx_agraph.to_agraph(im)\n im_agraph.draw(fname, prog='dot')", "def draw(self):\n nx.draw_networkx(self.rc)", "def show_image(dataset, domain, image_class, image_name):\n\timage_file = io.imread(os.path.join(\"data\", dataset, domain, \"images\", image_class, image_name))\n\tplt.imshow(image_file)\n\tplt.pause(0.001)\n\tplt.figure()", "def create_graphic(X):\n plt.close('all')\n plt.figure(figsize=(12,6))\n sns.set(style='darkgrid', palette='bright')\n for i,j in enumerate(X): \n plt.subplot(2, 3, (i+1))\n plt.text(X[j], 0, X[j], color='black')\n plt.axvline(x=X[j], linestyle='--', c='red')\n sns.distplot(data[j].dropna(), bins=30, kde=False)\n plt.tight_layout()\n img = io.BytesIO()\n plt.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n graph = 'data:image/png;base64,{}'.format(graph_url)\n return graph", "def show_plot(img, title):\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.title(\"Hand Number: \" + title)\n plt.show()", "def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()", "def show(im,fig= None): #X\n im = im.copy()\n if len(im.shape)==1 or im.shape[1]==1:\n im = X2patch(im)\n im[im<=DEAD]=-0.5\n if fig is None:\n plt.figure()\n fig = plt.imshow(hsv_to_rgb(im+0.5))\n fig.set_data(hsv_to_rgb(im+0.5))\n plt.draw()\n plt.pause(0.001)\n return fig", "def complete_a_picture(viz_client):\n # Get a training set for a set of x-y coordinates, this one is part of a checkerboard pattern\n x_min = 0.0; x_max = 2.0; y_min = 0; y_max = 1.0\n train_in, train_out = make_checkerboard_training_set(num_points=1000, noise=0.00, randomize=True,\n x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max)\n\n # Plot he training set\n viz_client.plot_2d_classes(train_in, train_out, title=\"Training data\",\n x_min=x_min, x_max=x_max,\n y_min=y_min, y_max=y_max, delta=0.01)\n\n training_size = train_in.shape[0]\n batch_size = round(training_size / 3.0)\n num_features = train_in.shape[1]\n\n # Initialize a numpynet object\n numpy_net = NumpyNet(num_features, batch_size,\n num_hidden=5, hidden_sizes=[4, 8, 16, 8, 4],\n activation=[\"tanh\", \"tanh\", \"tanh\", \"tanh\", \"tanh\", \"tanh\"],\n learning_rate=0.0001,\n dropout_rate=None, weight_decay=None,\n random_seed=1337)\n # Hook the object up to the viz client\n numpy_net.set_viz_client(viz_client)\n\n # A basic report of the net to the logs\n numpy_net.report_model()\n\n # Train the model!\n numpy_net.train(train_in, train_out, epochs=10000,\n visualize=True, visualize_percent=1, save_best=\"./numpynet_best_model.pickle\",\n debug_visualize=True)\n\n # A silly viz of the network architecture (if the net isn't too huge to make it muddled)\n if max(numpy_net.layer_sizes) <= 16:\n numpy_net.viz.network_svg(numpy_net)", "def base_visualization(self, filename, save_format='jpg', cfg=None):\n # TODO and detailed mode for the visualization function\n # in which the graph will also contain all the weights/bias\n # information.\n # if not cfg:\n # cfg = {}\n # graph = graphviz.Digraph(format=save_format)\n # self.visited.clear()\n # for _input in self.graph.inputs():\n # if _input.type().kind() == CLASSTYPE_KIND:\n # continue\n # self.visual_traverse(_input, graph, None, cfg)\n # graph.render(filename)\n pass", "def visualize(self):\n dot = Graph()\n \n for k, v in self.vs.items():\n if v.observed:\n dot.node(v.word, style=\"filled\")\n else:\n dot.node(v.word)\n\n for i, (k, v) in enumerate(self.fs.items()):\n dot.node(str(i), shape=\"square\", style=\"bold\")\n s, t = k[1], k[3]\n dot.edge(s, str(i))\n dot.edge(t, str(i))\n \n print dot.source\n #src.render('test-output/holy-grenade.gv', view=True)", "def vis_segmentation(image, seg_map):\n plt.figure(figsize=(20, 20))\n \n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n plt.imshow(seg_image)\n plt.axis('off')\n plt.savefig(str(image_id)+'_seg.jpg',bbox_inches='tight')\n plt.close()", "def show_picture(self, data):\n raise NotImplementedError", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def draw(self):\n draw(self.graph)", "def visualize(self, filename, options = {'showHead'}):\n\t\tVisualizer.useGraphViz(self, filename, options)", "def show_digit(self):\n x_train, _, _, _ = self._load_data()\n plt.imshow(x_train[0], cmap=plt.cm.binary)\n plt.show()", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def visualize(**images):\n n_images = len(images)\n plt.figure(figsize=(20,8))\n for idx, (name, image) in enumerate(images.items()):\n plt.subplot(1, n_images, idx + 1)\n plt.xticks([]); \n plt.yticks([])\n # get title from the parameter names\n plt.title(name.replace('_',' ').title(), fontsize=20)\n plt.imshow(image)\n plt.savefig('sample_gt_pred_2_max.jpeg')\n plt.show()", "def show_env(self, img):\n plt.figure(1)\n plt.subplot(111)\n plt.imshow(img, interpolation=\"nearest\")\n plt.show()", "def show(raster):\n gk.read(raster).show()", "def _visualize(self, unnorm_image, class_ids, scores, bounding_boxes):\n ax = utils.viz.plot_bbox(unnorm_image,\n bounding_boxes[0],\n scores[0],\n class_ids[0],\n class_names=self._network.classes)\n fig = plt.gcf()\n fig.set_size_inches(14, 14)\n plt.show()", "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def show_feat(feat_map):\n for i in range(feat_map.shape[0]):\n plt.imshow(feat_map[i])\n plt.show()", "def visualizeImg(img):\n plt.figure(figsize=(10,4))\n plt.imshow(img)\n plt.show()", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def visualize(model: Model, structural_part=True, measurement_part=False,\n view=True, filename=None, title=''):\n g = gv.Digraph(format='jpg', graph_attr={'label': title})\n if structural_part:\n g.node_attr.update(color='red', shape='box')\n for i, j in model.parameters['Beta']:\n lval, rval = model.beta_names[0][i], model.beta_names[0][j]\n g.edge(rval, lval)\n if measurement_part:\n g.node_attr.update(color='black', shape='circle')\n for i, j in model.parameters['Lambda']:\n lval, rval = model.lambda_names[0][i], model.lambda_names[0][j]\n g.edge(lval, rval)\n g.render(filename, view=view)", "def imdisplay(filename, representation):\n img = read_image(filename, representation)\n if representation == GS_REP:\n plt.imshow(img, cmap=plt.cm.gray)\n else:\n plt.imshow(img)", "def imshow(self):\n axes([0, 0, 1, 1], xticks=[], yticks=[])\n imshow(self.rgb_image())", "def draw_network(graph, filename):\n plt.figure(figsize=(12,12))\n nx.draw_networkx(graph, with_labels=False, alpha=.5, width=.1, node_size=100)\n plt.axis(\"off\")\n plt.savefig(filename, format=\"PNG\")", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()", "def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()", "def visualize(self):\n # TODO\n #pyLDAvis.enable_notebook()\n #vis = pyLDAvis.gensim.prepare(self.lda_model, self.stemmed_corpus)\n return", "def draw(self, img_path=None):\n fig, ax = plt.subplots(figsize=self.figsize)\n\n # Set the axis limits\n plt.xlim(self.xlim)\n plt.ylim(self.ylim)\n\n # Draw the nodes\n for node in self.nodes:\n node.add_circle(ax)\n\n # Add the transitions\n for i in range(self.M.shape[0]):\n for j in range(self.M.shape[1]):\n # self loops\n # if i == j:\n if (i == j) and (self.M[i,j] != 0):\n # Loop direction\n if self.nodes[i].y >= 0:\n self.nodes[i].add_self_loop(ax, prob = self.M[i,j], direction='up')\n else:\n self.nodes[i].add_self_loop(ax, prob = self.M[i,j], direction='down')\n # directed arrows\n elif self.M[i,j] > 0:\n self.add_arrow(ax, self.nodes[i], self.nodes[j], prob = self.M[i,j])\n\n plt.axis('off')\n # Save the image to disk?\n if img_path:\n plt.savefig(img_path)\n plt.show()", "def show_pair(data, labeled=True, ordering='channel_last'):\n xb, yb = data\n batch_size = len(xb)\n \n fig = plt.figure(figsize=(5 * 2, 5 * batch_size))\n for i in range(batch_size):\n fig.add_subplot(batch_size, 2, (2 * i) + 1) \n show_image(xb[i], ordering=ordering)\n \n fig.add_subplot(batch_size, 2, (2 * i) + 2) \n if yb[i] is None:\n y = np.zeros_like(xb[i])\n else:\n y = yb[i] \n\n if labeled:\n show_label(y, ordering=ordering)\n else:\n show_image(y, ordering=ordering)\n \n return fig", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return", "def getGraphFigure(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n return fig", "def visualize_graph(self):\n self._graph.GetVertexData().AddArray(self._labels)\n self._graph.GetEdgeData().AddArray(self._weights)\n colors = vtk.vtkUnsignedCharArray()\n colors.SetNumberOfComponents(1)\n colors.SetName('Colors')\n types = int(245 / len(self._color_dict))\n for c in self._colors:\n colors.InsertNextValue(int(c * types))\n self._graph.GetVertexData().AddArray(colors)\n graphLayoutView = vtk.vtkGraphLayoutView()\n graphLayoutView.AddRepresentationFromInput(self._graph)\n graphLayoutView.SetLayoutStrategy(vtk.vtkSpanTreeLayoutStrategy())\n graphLayoutView.GetLayoutStrategy().SetEdgeWeightField(\"Weights\")\n graphLayoutView.GetLayoutStrategy().SetWeightEdges(1)\n graphLayoutView.GetRenderer().GetActiveCamera().ParallelProjectionOff()\n graphLayoutView.SetEdgeLabelArrayName(\"Weights\")\n graphLayoutView.SetEdgeLabelVisibility(1)\n graphLayoutView.SetVertexLabelArrayName('labels')\n graphLayoutView.SetVertexLabelVisibility(1)\n graphLayoutView.SetVertexColorArrayName('Colors')\n graphLayoutView.SetColorVertices(1)\n graphLayoutView.SetInteractorStyle(MouseAndKeysInteractor(graphLayoutView))\n graphLayoutView.ResetCamera()\n graphLayoutView.Render()\n graphLayoutView.GetInteractor().Start()", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def show(self, output_file=\"ast_viz.pdf\"):\n pos = radial_tree_layout(self.graph, self.graph.vertex(0))\n scale = self.graph.num_vertices()\n\n graph_draw(self.graph, vertex_text=self.graph.vp.type, # self.graph.vertex_index, #\n pos=pos, vertex_font_size=scale,\n output=output_file, output_size=(scale * 200, scale * 200))", "def plot_networks(student, shape):\n plt.figure()\n s = np.arange(np.prod(shape))\n plt.figure()\n value = student.train_model.value(s).reshape(shape)\n plt.imshow(value)\n\n pi = student.train_model.proba_step(s).T.reshape((-1,) + shape)\n x, y = np.unravel_index(s, shape)\n\n for a in range(pi.shape[0]):\n if a == UP:\n u = np.zeros_like(s)\n v = pi[a].T.ravel()\n if a == DOWN:\n u = np.zeros_like(s)\n v = -pi[a].T.ravel()\n if a == RIGHT:\n v = np.zeros_like(s)\n u = pi[a].T.ravel()\n if a == LEFT:\n v = np.zeros_like(s)\n u = -pi[a].T.ravel()\n plt.quiver(x, y, u, v)", "def network_graph(net_dict=None):\n if net_dict == None:\n net_dict = {}\n else:\n G = nx.from_dict_of_lists(net_dict)\n plt.figure(num=None, figsize=(30, 30), dpi=80, facecolor='w', edgecolor='c')\n nx.draw_networkx(G, with_labels=True, alpha=0.5, edge_color='c', cmap=plt.cm.GnBu)\n plt.savefig(\"metabolism_5years.png\", bbox_inches='tight')", "def visualize_genome(self, debug=False, genome=None, show: bool = True):\n if not genome:\n genome = self.best_genome if self.best_genome else list(self.population.values())[0]\n name = f\"genome_{genome.key}\"\n sf = get_subfolder(f\"population{'_backup' if self.use_backup else ''}/\"\n f\"storage/\"\n f\"{self.folder_name}/\"\n f\"{self}/\", 'images')\n sf = get_subfolder(sf, f'architectures{\"_debug\" if debug else \"\"}')\n draw_net(config=self.config.genome,\n genome=genome,\n debug=debug,\n filename=f'{sf}{name}',\n view=show)", "def generate(self, diagram):" ]
[ "0.71855307", "0.69043744", "0.68482786", "0.67418605", "0.6528953", "0.64814496", "0.64277744", "0.6422355", "0.6409942", "0.6409942", "0.6409942", "0.6407268", "0.6398413", "0.6386937", "0.63642865", "0.6362115", "0.6361208", "0.6333713", "0.6332453", "0.6316993", "0.62613827", "0.62551194", "0.6251797", "0.62516636", "0.6251553", "0.6250355", "0.62473595", "0.6246402", "0.6235041", "0.62313986", "0.6219455", "0.6219455", "0.6219455", "0.6213501", "0.6205085", "0.62021035", "0.62006557", "0.6191152", "0.6188563", "0.61868", "0.61849433", "0.61772114", "0.6166891", "0.61668813", "0.6162162", "0.6156786", "0.61433035", "0.6141457", "0.6141163", "0.6140301", "0.61348313", "0.61296487", "0.61134213", "0.61038303", "0.610154", "0.6093232", "0.6090266", "0.6089701", "0.60894", "0.60586226", "0.605367", "0.6052503", "0.6046697", "0.6044754", "0.6041772", "0.6036576", "0.602411", "0.6020245", "0.601923", "0.6019176", "0.6018024", "0.60080826", "0.60076654", "0.60076654", "0.6003274", "0.5995241", "0.59928083", "0.59874254", "0.5986008", "0.5982947", "0.5980214", "0.59746", "0.59745383", "0.59741205", "0.59661865", "0.5962968", "0.59462494", "0.594046", "0.59382933", "0.59378743", "0.5934122", "0.5930663", "0.5929099", "0.5926696", "0.5925619", "0.59226596", "0.5922361", "0.5919344", "0.59087646", "0.5896094", "0.58956856" ]
0.0
-1
A deserializer reads hypergraphs dumped into files back into memory. The deserializer is initialized with types of nodes and edges used to construct a hypergraph.
def __init__(self, node_class=Node, edge_class=Edge): self.node_class = node_class self.edge_class = edge_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deserialize(self, filename):\n f = open(filename)\n edges_tails = []\n nodes = []\n # first pass adds incoming edges to nodes\n for line in f:\n if '->' in line: # edge\n edge = self.edge_class()\n tail_ids, head_id = edge.deserialize(line)\n nodes[head_id].add_incoming(edge)\n edges_tails.append((edge, tail_ids))\n else: # node\n node = self.node_class()\n node.deserialize(line)\n assert node.id == len(nodes), 'nodes shall appear in order'\n nodes.append(node)\n # second pass adds tail nodes to edges\n for edge, tail_ids in edges_tails:\n for nid in tail_ids:\n edge.add_tail(nodes[nid])\n f.close()\n # make a toposorted hypergraph\n hg = Hypergraph(nodes[0])\n hg.nodes = nodes\n for node in hg:\n node.hg = hg\n for edge in hg.edges():\n edge.hg = hg\n hg.tasks_done.add('topo_sort')\n return hg", "def deserialize(data):\n graph = INode.deserialize(data)\n graph._nodes = []\n for node in data['nodes']:\n graph._nodes.append(INode.deserialize(node))\n for node in data['nodes']:\n this = [n for n in graph.nodes\n if n.identifier == node['identifier']][0]\n for name, input_ in node['inputs'].items():\n for identifier, plug in input_['connections'].items():\n upstream = [n for n in graph.nodes\n if n.identifier == identifier][0]\n upstream.outputs[plug] >> this.inputs[name]\n return graph", "def deserialize(self, data):\n if not data:\n return\n q = collections.deque(data.split(self.sep))\n res = self.dfs2(q)\n return res", "def deserialize(self, data):\n q = collections.deque(data.split(self.sep))\n \n res = self.dfs2(q)\n \n return res", "def _load_edges(self, path_to_split: str) -> NoReturn:\n print(f'Loading edges from {path_to_split}')\n self.train_edges = np.load(f'{path_to_split}/train_edges.npy',\n allow_pickle=True).item()\n self.val_edges = np.load(f'{path_to_split}/val_edges.npy',\n allow_pickle=True).item()\n self.test_edges = np.load(f'{path_to_split}/test_edges.npy',\n allow_pickle=True).item()\n self.test_edges_false = np.load(f'{path_to_split}/' +\n f'test_edges_false.npy',\n allow_pickle=True).item()\n self.val_edges_false = np.load(f'{path_to_split}/' +\n f'val_edges_false.npy',\n allow_pickle=True).item()\n self.adj_train = np.load(f'{path_to_split}/' +\n f'adj_train.npy',\n allow_pickle=True).item()", "def read(cls, inputfilename):\n\n # import json\n # with open(inputfilename, 'w') as infile:\n # data = json.load(infile)\n # g = nx.readwrite.json_graph.node_link_graph(data)\n # return cls(network = g)\n return cls(network=nx.read_gpickle(inputfilename))", "def deserialize(self, data):", "def deserialize_model(hypes):\n model_path_trained = hypes['segmenter']['serialized_model_path']\n if not os.path.isfile(model_path_trained):\n logging.warning(\"No model found at '%s'.\", model_path_trained)\n network_path = hypes['segmenter']['network_path']\n network = imp.load_source('sst.network', network_path)\n trained = network.load_model(hypes)\n return trained", "def loadgraph(self, path):\n\n raise NotImplementedError", "def deserialize(cls, path):\r\n with open(path, \"rb\") as fd:\r\n fitter = rpickle.load(fd)\r\n fitter.initializeRoadRunnerModel()\r\n return fitter", "def deserialize(cls, data):\r\n dtype = data.get('_type')\r\n if dtype == 'vertex':\r\n vertex_type = data['element_type']\r\n if vertex_type not in vertex_types:\r\n raise ElementDefinitionException('Vertex \"{}\" not defined'.format(vertex_type))\r\n translated_data = vertex_types[vertex_type].translate_db_fields(data)\r\n return vertex_types[vertex_type](**translated_data)\r\n elif dtype == 'edge':\r\n edge_type = data['_label']\r\n if edge_type not in edge_types:\r\n raise ElementDefinitionException('Edge \"{}\" not defined'.format(edge_type))\r\n translated_data = edge_types[edge_type].translate_db_fields(data)\r\n return edge_types[edge_type](data['_outV'], data['_inV'], **translated_data)\r\n else:\r\n raise TypeError(\"Can't deserialize '{}'\".format(dtype))", "def deserialize(self, data):\n nodes =iter(data.split(','))\n return self.deserialize_tree(nodes)", "def load_graph(self, path):\n if path.split('.')[-1]=='gexf':\n self.graph = nx.read_gexf(path)\n else:\n self.graph = nx.read_gpickle(path)", "def load_graph(self, filename):\n try:\n file_extention = list(filename.split(\".\"))[-1]\n if file_extention == \"gml\":\n self.graph = nx.read_gml(filename)\n if file_extention == \"adjlist\":\n self.graph = nx.read_adjlist(filename)\n if file_extention == \"yaml\":\n self.graph = nx.read_yaml(filename)\n except Exception as e:\n print(\"Error in loading Graph file: The error is\", e)", "def read_input_from_file(f):\n f.readline()\n size = int(f.readline().split()[-1])\n nb_edges = int(f.readline().split()[-1])\n\n g = UndirectedGraph()\n\n if parameters.DEBUG:\n print('Build nodes')\n\n nodes = [g.add_node() for _ in range(size)]\n\n if parameters.DEBUG:\n print('Build edges')\n edges = []\n weights = {}\n i = 0\n for i in range(nb_edges):\n if parameters.DEBUG:\n i += 1\n if i % 1000 == 0:\n print('Edge %d / %d' % (i, nb_edges))\n line = f.readline()\n _, u, v, w = line.split()\n\n e = g.add_edge(nodes[int(u) - 1], nodes[int(v) - 1])\n weights[e] = int(w)\n\n edges.append((int(u), int(v), int(w)))\n\n line = f.readline()\n while 'Terminals' not in line:\n line = f.readline()\n if 'SECTION' in line:\n line = f.readline()\n while 'Terminals' not in line:\n line = f.readline()\n nb_terms = int(line.split()[-1])\n terms = []\n for i in range(nb_terms):\n line = f.readline()\n _, t = line.split()\n terms.append(nodes[int(t) - 1])\n\n return instances.SteinerInstance(g, terms, weights)", "def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!", "def deserialize(data, consolidator=None):\n tmp = json.loads(data)\n nodelist = []\n for n in tmp[\"nodes\"]:\n node = Node(n[\"name\"])\n node.text = n[\"text\"]\n for neighbor in n[\"neighbors\"]:\n if neighbor[0] == \"relation\":\n node.add_relation(neighbor[1], tmp[\"idmap\"][str(neighbor[2])])\n node\n elif neighbor[0] == \"literal\":\n node.add_attribute(neighbor[1], str(neighbor[2]))\n nodelist.append(node)\n return Domain(nodelist, consolidator=consolidator)", "def getDeserializer():", "def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')", "def readEdges(self, fileName, format):\n f = open(fileName)\n if format == 'simple':\n edgesRaw = f.read().split(\"\\n\")\n\n if edgesRaw[-1] == '': edgesRaw = edgesRaw[:-1]\n\n for edge in edgesRaw:\n [startVertex, endVertex] = edge.split(\"--\")\n newEdge = Edge(Vertex(int(startVertex)), Vertex(int(endVertex)))\n self.addEdge(newEdge)", "def deserialize(self, serialized_outputs: Dict[str, Any], modules: Dict[str, 'NeuralModule']):\n # Check type.\n if serialized_outputs[\"type\"] == \"default\":\n # We still need to deserialize.\n # Use-case: deserialization of a graph with nested graph with bound output.\n d = self._default_outputs\n else:\n d = self._manual_outputs\n\n # Iterate through serialized inputs one by one.\n for i in serialized_outputs[\"mappings\"]:\n # Deserialize!\n [producer, key_ntype] = i.split(\"->\")\n [key, ntype_str] = key_ntype.split(\" | \")\n [step_number, producer_name, producer_port_name] = producer.split(\".\")\n # Get neural type from module output port definition.\n ntype = modules[producer_name].output_ports[producer_port_name]\n\n # Make sure the graph bound port type matches the deserialized type.\n assert ntype_str == str(ntype)\n\n # Create a new input.\n go = GraphOutput(ntype, StepModulePort(int(step_number), producer_name, producer_port_name))\n d[key] = go\n\n # Done.", "def from_dict(cls, dikt) -> 'Neo4jGraphData':\n return util.deserialize_model(dikt, cls)", "def build_graph(self):\n edge_data_by_type, all_edges, all_nodes = self.load_training_data(\n self.train_edges_file,\n slf_loop=self.config['slf_loop'],\n symmetry_edge=self.config['symmetry_edge'])\n\n num_nodes = len(all_nodes)\n node_features = {\n 'index': np.array(\n [i for i in range(num_nodes)], dtype=np.int64).reshape(-1, 1)\n }\n\n self.graph = heter_graph.HeterGraph(\n num_nodes=num_nodes,\n edges=edge_data_by_type,\n node_types=None,\n node_feat=node_features)\n\n self.edge_types = sorted(self.graph.edge_types_info())\n logging.info('total %d nodes are loaded' % (self.graph.num_nodes))", "def graph_reader(path):\n graph = nx.from_edgelist(pd.read_csv(path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def parse( self, data, baseURL, *args, **named ):\n sg = basenodes.sceneGraph(\n )\n \n # these three are shared among all shapes\n hash = md5( baseURL ).hexdigest()\n coord = basenodes.Coordinate( DEF='Coord-%s'%(hash,) )\n normal = basenodes.Normal(DEF='Norm-%s'%(hash,))\n texCoord = basenodes.TextureCoordinate(DEF='TexCoord-%s'%(hash,))\n\n mesh = None # transforms\n group = None # shape\n material = None # appearance, material, texture\n \n materials = {}\n \n # indices are 1-based, the first values are never used...\n vertices = [[0., 0., 0.]] \n normals = [[0., 0., 0.]]\n tex_coords = [[0., 0.]]\n \n current_vertex_indices = []\n current_normal_indices = []\n current_texcoord_indices = []\n\n for line in data.splitlines():\n if line.startswith('#'): \n continue\n values = line.split()\n if not values: \n continue\n\n if values[0] == 'v':\n vertices.append(map(float, values[1:4]))\n elif values[0] == 'vn':\n normals.append(map(float, values[1:4]))\n elif values[0] == 'vt':\n tex_coords.append(map(float, values[1:3]))\n elif values[0] == 'mtllib':\n self.load_material_library(values[1], materials, baseURL)\n elif values[0] in ('usemtl', 'usemat'):\n material = materials.get(values[1], None)\n if material is None:\n log.warn('Unknown material: %s', values[1])\n material = self.defaultMaterial()\n if mesh is not None:\n if group and current_vertex_indices:\n group.geometry.coordIndex = current_vertex_indices\n group.geometry.texCoordIndex = current_texcoord_indices\n group.geometry.normalIndex = current_normal_indices\n current_vertex_indices = []\n current_texcoord_indices = []\n current_normal_indices = []\n group = basenodes.Shape(\n geometry = basenodes.IndexedFaceSet(\n coord = coord,\n normal = normal,\n texCoord = texCoord,\n solid=False,\n ),\n appearance = material,\n )\n mesh.children.append(group)\n elif values[0] == 'o':\n mesh = basenodes.Transform( DEF = values[1] )\n sg.children.append( mesh )\n sg.regDefName( values[1], mesh )\n # previous shape is no longer current...\n group = None\n elif values[0] == 's':\n # a smoothing-group definition...\n # not currently supported...\n pass\n elif values[0] == 'f':\n # adds a single face\n if mesh is None:\n # anonymous transform\n mesh = basenodes.Transform()\n sg.children.append(mesh)\n if material is None:\n material = self.defaultMaterial()\n if group is None:\n group = basenodes.Shape( \n geometry = basenodes.IndexedFaceSet(\n coord = coord,\n normal = normal,\n texCoord = texCoord,\n solid=False,\n ),\n appearance = material,\n )\n mesh.children.append(group)\n\n for i, v in enumerate(values[1:]):\n v_index, t_index, n_index = self._cleanIndex( v )\n current_vertex_indices.append( v_index )\n current_texcoord_indices.append( t_index )\n current_normal_indices.append( n_index )\n current_vertex_indices.append( -1 )\n current_texcoord_indices.append( -1 )\n current_normal_indices.append( -1 )\n else:\n log.warn( \"\"\"Unrecognized operation: %r\"\"\", values )\n if group and current_vertex_indices:\n group.geometry.coordIndex = current_vertex_indices\n group.geometry.texCoordIndex = current_texcoord_indices\n group.geometry.normalIndex = current_normal_indices\n coord.point = vertices\n normal.normal = normals\n texCoord.texCoord = tex_coords\n return True,sg\n \n \n # this creates a pointset-only version of the geometry...", "def load_graph(f: IO[str], graph_class=Graph) -> list[Graph]:\n return read_graph_list(graph_class, f)", "def deserialize(self, data):\n preorder = map(int, data.split())\n inorder = sorted(preorder)\n return self.buildTree(preorder, inorder)", "def deserialize_from_deconstructed(cls, type_cls, args, kwargs):\n raise NotImplementedError", "def deserializer():\n return bytes.decode", "def deserialize(self, data):\n vals = [val for val in data.split()]\n queue = collections.deque(vals)\n if not queue:\n return\n root = Node(int(queue.popleft()), [])\n\n def helper(node):\n if not queue:\n return\n while queue[0] != '#':\n child = Node(int(queue.popleft()), [])\n node.children.append(child)\n helper(child)\n queue.popleft()\n\n helper(root)\n return root", "def from_dict(cls, dikt) -> 'Edge':\n return util.deserialize_model(dikt, cls)", "def deserialize(path):\n with open(path, 'rb') as f:\n temp = pickle.load(f)\n for q in temp.questions:\n q.on_deserialize()\n return temp", "def deserialize(self, instream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def _load_data_graph(self):\n # input\n with tf.variable_scope(\"train_test\", reuse=True):\n # review input - Both original and reversed\n self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name=\"input%i\" % t)\n for t in range(self.seq_length)]\n self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name=\"input%i\" % t)\n for t in range(self.seq_length)]\n # desired output\n self.labels = [tf.placeholder(tf.int32, shape=(None,), name=\"labels%i\" % t)\n for t in range(self.seq_length)]\n # weight of the hidden layer\n self.weights = [tf.ones_like(labels_t, dtype=tf.float32)\n for labels_t in self.labels]\n\n # Decoder input: prepend some \"GO\" token and drop the final\n # token of the encoder input\n self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name=\"GO\")] + self.labels[:-1])", "def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph", "def deserialize(self, data):\n # root = TreeNode(data[0])\n # self.helper(data)\n if not data: return []\n return self.helper(data)", "def _deserialize(self):\n try:\n self._as_dict = yaml.load(self.path)\n except ScannerError as e:\n raise exc.ContentSerializeError(self, self.path, e.problem)", "def load(self):\n # Check whether Unpickler was initialized correctly. This is\n # only needed to mimic the behavior of _pickle.Unpickler.dump().\n if not hasattr(self, \"_file_read\"):\n raise UnpicklingError(\n \"Unpickler.__init__() was not called by \"\n \"%s.__init__()\" % (self.__class__.__name__,)\n )\n self._unframer = _Unframer(self._file_read, self._file_readline)\n self.read = self._unframer.read\n self.readinto = self._unframer.readinto\n self.readline = self._unframer.readline\n self.metastack = []\n self.stack = []\n self.append = self.stack.append\n self.proto = 0\n read = self.read\n dispatch = self.dispatch\n try:\n while True:\n key = read(1)\n if not key:\n raise EOFError\n assert isinstance(key, (bytes, bytearray))\n dispatch[key[0]](self)\n print(\"STK\", bytes([key[0]]), self.stack)\n except _Stop as stopinst:\n return stopinst.value", "def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')", "def _load_from_path(self):\n logging.info(\"Loading graph data from %r\", self.path)\n self._load_vconstraints_from_path(self.vertices_constraints_path)\n self._load_vertices_from_path(self.vertices_path)\n self._load_edges_from_path(self.edges_path)\n logging.info(\"Completed %r graph import\", self.path)", "def read_graph(settings):\n if settings.edgelist_input:\n graph = nx.read_edgelist(settings.input)\n else:\n edge_list = pd.read_csv(settings.input).values.tolist()\n graph = nx.from_edgelist(edge_list)\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def read_graph(filename):\n G = Hypergraph()\n\n f = open(filename, 'r', encoding='utf8')\n lines = f.readlines()\n if args.weighted:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n weight = line[1]\n G.add_edge(edge_name, line[2:], float(weight))\n else:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n G.add_edge(edge_name, line[1:])\n f.close()\n return G", "def _post_deserialize (self):\n pass", "def FromFile(cls, path: pathlib.Path, ir_id: int):\n with open(path, \"rb\") as f:\n graph_tuple = pickle.load(f)\n\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)", "def load_graph(graph_path):\n graph = nx.from_edgelist(pd.read_csv(graph_path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def deserialize_object(d):\n pass", "def deserialize(self):\n with open(os.path.join(self.root_path, self._data_file), 'r') as file:\n data = json.load(file)\n for key, val in data.items():\n self.__dict__[key] = val", "def __init__(self, file_path=None, file_type='htk', header=None,\n nframes=None):\n self.header = header\n self.nframes = nframes\n self.nodes = []\n self.arcs = []\n self.start = None\n self.end = None\n if file_path:\n if file_type == 'htk':\n self.htk2dag(file_path)\n elif file_type == 'kaldi':\n self.kaldi2dag(file_path)\n else:\n raise ValueError('file_type must be either htk or kaldi')", "def deserialize(self, data):\n def build():\n val = array.next()\n if val == '#':\n return None\n node = TreeNode(int(val))\n node.left = build()\n node.right = build()\n return node\n array = iter(data.split())\n return build()", "def load(self, path):\n\n # Extract files to temporary directory and load content\n with TemporaryDirectory() as directory:\n # Unpack files\n archive = ArchiveFactory.create(directory)\n archive.load(path, \"tar\")\n\n # Load graph backend\n self.loadgraph(f\"{directory}/graph\")\n\n # Load categories, if necessary\n path = f\"{directory}/categories\"\n if os.path.exists(path):\n with open(path, \"rb\") as handle:\n self.categories = pickle.load(handle)\n\n # Load topics, if necessary\n path = f\"{directory}/topics\"\n if os.path.exists(path):\n with open(path, \"rb\") as handle:\n self.topics = pickle.load(handle)", "def load_graph(f: IO[str], graph_class=Graph, read_list: bool = False) -> Union[Tuple[List[Graph], List[str]], Graph]:\n if read_list:\n graph_list, options = read_graph_list(graph_class, f)\n return graph_list, options\n else:\n graph, options, tmp = read_graph(graph_class, f)\n return graph # ,options", "def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numnod = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.nodes = []\n\n for _ in range(self.numnod):\n node = FRDNode()\n self.nodes.append(node)\n if self.format < 2:\n in_file.read(1)\n node.key = int(in_file.read(2))\n node.number = int(in_file.read(5*(self.format+1)))\n node.pos = [float(in_file.read(12)) for j in range(3)]\n in_file.read(1) # eol\n else:\n node.number = struct.unpack('i', in_file.read(4))[0]\n if self.format == 2:\n node.pos = struct.unpack('fff', in_file.read(12))\n else:\n node.pos = struct.unpack('ddd', in_file.read(24))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def testGraphExtract(self):\n graph = Graph2()\n graph.parseFile(TESTFILE)", "def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()", "def ReadGraph(inputFileName):\n inputFile = open(inputFileName)\n jsonGraphArray = json.load(inputFile)\n graph = Graph.Graph()\n graph.load_from_json(jsonGraphArray)\n inputFile.close()\n return graph", "def parse (cls, raw_data):\n # Parse text\n model = NFFGModel.parse(raw_data)\n # Create new NFFG\n nffg = NFFG(id=model.id, name=model.name, service_id=model.service_id,\n version=model.version, mode=model.mode, metadata=model.metadata)\n # Load Infras\n for infra in model.node_infras:\n nffg.add_node(infra)\n # Load SAPs\n for sap in model.node_saps:\n nffg.add_node(sap)\n # Load NFs\n for nf in model.node_nfs:\n nffg.add_node(nf)\n # Load Links\n for link in model.edge_links:\n if link.src.node.type == NFFG.TYPE_NF or \\\n link.dst.node.type == NFFG.TYPE_NF:\n link.type = str(NFFG.TYPE_LINK_DYNAMIC)\n nffg.add_edge(link.src.node, link.dst.node, link)\n # Load SG next hops\n for hop in model.edge_sg_nexthops:\n nffg.add_edge(hop.src.node, hop.dst.node, hop)\n # Load Requirements\n for req in model.edge_reqs:\n nffg.add_edge(req.src.node, req.dst.node, req)\n return nffg", "def _deserialize(self, handle):\n raise NotImplementedError", "def deserialize(self, data):\n def rdeserialize(l):\n \"\"\" a recursive helper function for deserialization.\"\"\"\n if l[0] == 'None':\n l.pop(0)\n return None\n \n root = Node(l[0])\n l.pop(0)\n root.left = rdeserialize(l)\n root.right = rdeserialize(l)\n return root\n\n data_list = data.split(',')\n root = rdeserialize(data_list)\n return root", "def __init__(self, filename, load=False):\n print(\"Wikipedia Parser initialized\")\n if not load:\n print(\"Data not persisted\")\n self._edges, self._timestamps = self._read_data(filename)\n self._median_timestamp = self.get_median_timestamp()\n self._graph1, self._graph2 = self._build_graphs()\n with open(self.PIK_NAME, \"wb\") as f:\n data = (self._graph1, self._graph2, self._gt_edges)\n pickle.dump(data, f)\n else:\n with open(self.PIK_NAME, \"rb\") as f:\n self._graph1, self._graph2, self._gt_edges = pickle.load(f)", "def deserialize_topology(serialized_topology):\n topology_dict = json.loads(serialized_topology)\n atoms = pandas.read_json(topology_dict['atoms'], orient='records')\n bonds = np.array(topology_dict['bonds'])\n topology = mdtraj.Topology.from_dataframe(atoms, bonds)\n return topology", "def deserialize(self, data):\n array = data.split(\",\")\n def dfs(array):\n if len(array) == 0: return None\n first = array.pop(0)\n if first == \"null\": return None\n node = TreeNode(int(first))\n node.left = dfs(array)\n node.right = dfs(array)\n return node\n return dfs(array)", "def deserialize(self):\n with open(self.path+self.name, \"rb\") as pfile:\n dataSet = pickle.load(pfile)\n return dataSet", "def deserialize(self, data):\n vals = iter(data.split(\",\"))\n\n def to_node():\n c = vals.next()\n if c == '#':\n return None\n else:\n node = TreeNode(int(c))\n node.left = to_node()\n node.right = to_node()\n return node\n\n return to_node()", "def deserialize(self, data):\n return NotImplementedError", "def deserialize(self, blob):\n pass", "def read_file(self) -> None:\n if not os.path.exists(self.location) or not os.path.isfile(\n self.location\n ):\n raise FileNotFoundError(self.location)\n\n self.graph = rdflib.Graph()\n try:\n if self.file_type is None:\n self.graph.parse(\n self.location, format=self.location.split(\".\")[-1]\n )\n else:\n self.graph.parse(self.location, format=self.file_type)\n except Exception:\n self.graph.parse(self.location)\n\n for (s, p, o) in self.graph:\n if p not in self.label_predicates:\n s_v = Vertex(str(s))\n o_v = Vertex(str(o))\n p_v = Vertex(str(p), predicate=True, vprev=s_v, vnext=o_v)\n self.add_vertex(s_v)\n self.add_vertex(p_v)\n self.add_vertex(o_v)\n self.add_edge(s_v, p_v)\n self.add_edge(p_v, o_v)", "def __init__(self, path):\n with open(path, 'r') as bt:\n self.headers = bt.readline().split(',')\n self.data = []\n for line in bt:\n self.data.append(list(eval(line)))\n self.scores = []\n self.models = {'dtr': DecisionTreeRegressor(),\n 'br': BaggingRegressor(n_jobs=-1),\n 'rfr': RandomForestRegressor(n_jobs=-1),\n }", "def __init__(self, path):\n self.graph = tf.Graph()\n\n graph_def = None\n tf.reset_default_graph()\n with tf.gfile.GFile(path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n if graph_def is None:\n raise RuntimeError('Cannot find inference graph in tar archive.')\n\n with self.graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n self.sess = tf.Session(graph=self.graph)", "def deserialize(self, data):\n def deserializeHelper(minVal, maxVal, values):\n if not values:\n return None\n if minVal < values[0] < maxVal:\n value = values[0]\n del values[0]\n node = self.Node(value)\n node.left = deserializeHelper(minVal, value, values)\n node.right = deserializeHelper(value, maxVal, values)\n return node\n return None\n values = [int(val) for val in data.split()]\n return deserializeHelper(float('-inf'), float('inf'), values)", "def load(self,fname, verbose=True, **kwargs):\n delimiter = kwargs[\"delimiter\"] if \"delimiter\" in kwargs.keys() else \" \"\n \n data = np.genfromtxt(fname, delimiter=delimiter, dtype=int, unpack=False)\n source, target = data[:,0], data[:,1]\n if data.shape[1] > 2:\n flux = data[:,2]\n else:\n flux = np.ones_like(source)\n nodes = set(source) | set(target)\n self.nodes = len(nodes)\n lines = len(flux)\n if set(range(self.nodes)) != nodes:\n new_node_ID = {old:new for new,old in enumerate(nodes)}\n map_new_node_ID = np.vectorize(new_node_ID.__getitem__)\n source = map_new_node_ID(source)\n target = map_new_node_ID(target)\n if verbose:\n print \"\\nThe node IDs have to run continuously from 0 to Number_of_nodes-1.\"\n print \"Node IDs have been changed according to the requirement.\\n-----------------------------------\\n\"\n \n \n print 'Lines: ',lines , ', Nodes: ', self.nodes\n print '-----------------------------------\\nData Structure:\\n\\nsource, target, weight \\n'\n for ii in range(7): \n print \"%i, %i, %1.2e\" %(source[ii], target[ii], flux[ii])\n print '-----------------------------------\\n'\n \n \n G = DiGraph() # Empty, directed Graph\n G.add_nodes_from(range(self.nodes))\n for ii in xrange(lines):\n u, v, w = int(source[ii]), int(target[ii]), float(flux[ii])\n if u != v: # ignore self loops\n assert not G.has_edge(u,v), \"Edge appeared twice - not supported\" \n G.add_edge(u,v,weight=w)\n else:\n if verbose:\n print \"ignore self loop at node\", u\n \n symmetric = True\n for s,t,w in G.edges(data=True):\n w1 = G[s][t][\"weight\"]\n try:\n w2 = G[t][s][\"weight\"]\n except KeyError:\n symmetric = False\n G.add_edge(t,s,weight=w1)\n w2 = w1\n if w1 != w2:\n symmetric = False\n G[s][t][\"weight\"] += G[t][s][\"weight\"]\n G[s][t][\"weight\"] /= 2\n G[t][s][\"weight\"] = G[s][t][\"weight\"]\n if verbose:\n if not symmetric:\n print \"The network has been symmetricised.\"\n \n \n ccs = strongly_connected_component_subgraphs(G)\n ccs = sorted(ccs, key=len, reverse=True)\n \n G_GSCC = ccs[0]\n if G_GSCC.number_of_nodes() != G.number_of_nodes():\n G = G_GSCC\n if verbose:\n print \"\\n--------------------------------------------------------------------------\"\n print \"The network has been restricted to the giant strongly connected component.\"\n self.nodes = G.number_of_nodes()\n \n \n \n \n for u, v, data in G.edges(data=True):\n weight = G.out_degree(u,weight='weight')\n data['transition_rate'] = 1.*data['weight']/weight\n \n \n for u, v, data in G.edges(data=True):\n data['effective_distance'] = 1. - log(data['transition_rate'])\n \n if verbose:\n print \"\\n--------------------------------------------------------------------------\"\n print \"\\nnode ID, out-weight, normalized out-weight, sum of effective distances \\n \"\n for ii in range(7):\n out_edges = G.out_edges(ii, data=True)\n out_weight, effective_distance, transition_rate = 0, 0, 0\n for u, v, data in out_edges:\n out_weight += data[\"weight\"]\n effective_distance += data[\"effective_distance\"]\n transition_rate += data[\"transition_rate\"]\n print \" %i %1.2e %2.3f %1.2e \" %(ii,out_weight, transition_rate, effective_distance)\n print \"\\n ... graph is saved in self.graph\"\n return G", "def read_graph_g2o(filename):\n Edge = namedtuple(\n 'Edge', ['Type', 'fromNode', 'toNode', 'measurement', 'information'])\n edges = []\n nodes = {}\n with open(filename, 'r') as file:\n for line in file:\n data = line.split()\n\n if data[0] == 'VERTEX_SE2':\n nodeId = int(data[1])\n pose = np.array(data[2:5], dtype=np.float32)\n nodes[nodeId] = pose\n\n elif data[0] == 'VERTEX_XY':\n nodeId = int(data[1])\n loc = np.array(data[2:4], dtype=np.float32)\n nodes[nodeId] = loc\n\n elif data[0] == 'EDGE_SE2':\n Type = 'P'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:6], dtype=np.float32)\n uppertri = np.array(data[6:12], dtype=np.float32)\n information = np.array(\n [[uppertri[0], uppertri[1], uppertri[2]],\n [uppertri[1], uppertri[3], uppertri[4]],\n [uppertri[2], uppertri[4], uppertri[5]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n elif data[0] == 'EDGE_SE2_XY':\n Type = 'L'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:5], dtype=np.float32)\n uppertri = np.array(data[5:8], dtype=np.float32)\n information = np.array([[uppertri[0], uppertri[1]],\n [uppertri[1], uppertri[2]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n else:\n print('VERTEX/EDGE type not defined')\n\n # compute state vector and lookup table\n lut = {}\n x = []\n offset = 0\n for nodeId in nodes:\n lut.update({nodeId: offset})\n offset = offset + len(nodes[nodeId])\n x.append(nodes[nodeId])\n x = np.concatenate(x, axis=0)\n\n # collect nodes, edges and lookup in graph structure\n graph = Graph(x, nodes, edges, lut)\n print('Loaded graph with {} nodes and {} edges'.format(\n len(graph.nodes), len(graph.edges)))\n\n return graph", "def deserialize(self, data, hashmap={}):\n raise NotImplemented()", "def deserialize(self, data):\n \n def recursiveDeserialize(stringList):\n if stringList[0] == 'None':\n stringList.pop(0)\n return None\n \n root = TreeNode(stringList[0])\n stringList.pop(0)\n root.left = recursiveDeserialize(stringList)\n root.right = recursiveDeserialize(stringList)\n \n return root\n \n return recursiveDeserialize(data.split(','))\n \n # NOTE: this is Level order BFS so doesn't work\n # since we serialized data using preorder traversal", "def load_graph(graph_url):\n graph_file = urllib2.urlopen(graph_url)\n graph_text = graph_file.read()\n graph_lines = graph_text.split('\\n')\n graph_lines = graph_lines[ : -1]\n \n edges = 0 # added count the number of edges\n print \"Loaded graph with\", len(graph_lines), \"nodes\" #1239\n \n answer_graph = {}\n for line in graph_lines:\n neighbors = line.split(' ')\n node = int(neighbors[0])\n answer_graph[node] = set([])\n for neighbor in neighbors[1 : -1]:\n answer_graph[node].add(int(neighbor))\n edges += len(answer_graph[node])\n print \"The number of edges are\", edges/2 #3047\n return answer_graph", "def deserialize(self, data):\n print data\n nodes = []\n for item in data[1:-1].split(\",\"):\n if item != \"null\":\n nodes.append(TreeNode(int(item)))\n else:\n nodes.append(None)\n q = deque()\n q.append(nodes[0])\n index = 0\n while len(q) > 0:\n cur = q.popleft()\n if cur:\n cur.left = nodes[index + 1]\n cur.right = nodes[index + 2]\n q.append(cur.left)\n q.append(cur.right)\n index += 2\n return nodes[0]", "def deserialize(self, data):\n if not data:\n return None\n ls = data.split('~')\n tree = []\n for item in ls:\n if item == 'None':\n tree.append(None)\n else:\n tree.append(int(item))\n\n def generator(ls):\n if not ls:\n return\n r = TreeNode(ls[0])\n q = collections.deque([r])\n tree_len = len(ls)\n cnt = 1\n while cnt < tree_len:\n if not q:\n break\n node = q.popleft()\n if node:\n node.left = TreeNode(ls[cnt]) if ls[cnt] is not None else None\n q.append(node.left)\n if cnt + 1 < tree_len:\n node.right = TreeNode(ls[cnt + 1]) if ls[cnt + 1] is not None else None\n q.append(node.right)\n cnt += 1\n cnt += 1\n return r\n ans = generator(tree)\n return ans", "def read_edgelist(\n path,\n comments=\"#\",\n delimiter=None,\n create_using=None,\n nodetype=None,\n encoding=\"utf-8\",\n):\n with open(path, \"rb\") as file:\n lines = (\n line if isinstance(line, str) else line.decode(encoding) for line in file\n )\n return parse_edgelist(\n lines,\n comments=comments,\n delimiter=delimiter,\n create_using=create_using,\n nodetype=nodetype,\n )", "def deserialize(self, data):\n nodes = data.strip(\"[\").strip(\"]\").split(\",\")\n header = nodes.pop(0)\n if header == \"null\":\n return None\n\n root = TreeNode(int(header))\n stack = [root]\n while nodes and stack:\n node = stack.pop(0)\n if node and nodes:\n n = nodes.pop(0)\n node.left = TreeNode(int(n)) if n != \"null\" else None\n stack.append(node.left)\n if node and nodes:\n n = nodes.pop(0)\n node.right = TreeNode(int(n)) if n != \"null\" else None\n stack.append(node.right)\n return root", "def deserialize(self, data, schema, **kwargs):\n return self.serializer.load(data, schema, **kwargs)", "def parser():\n global module_num_nodes\n nodes_dic = {}\n tl = {}\n br = {}\n \n def _square_size(module_num_nodes):\n \"\"\"Length of the smallest integer sized square with area of at least module_num_nodes.\n\n (size - 1) * (size - 1) < module_num_nodes <= size * size\"\"\"\n return int(math.ceil(math.sqrt(module_num_nodes)))\n\n def _add_edge(src, dst):\n \"\"\"Add an undirected edge.\"\"\"\n module_nodes[src].weights[dst] += 1\n module_nodes[dst].weights[src] += 1\n\n f = open(sys.argv[1].replace('.tr', '.graph'))\n line = f.readline()\n\n module_name = ''\n while line != '':\n # head of a module\n if line.startswith('module:'):\n if module_name != '': # save previous module's info\n nodes_dic[module_name] = module_nodes\n tl[module_name] = Point(0, 0)\n br[module_name] = Point(module_width - 1, module_height - 1)\n \n tokens = line.split(' ')\n module_name = tokens[1]\n module_num_nodes = int(f.readline())\n module_nodes = []\n\n if len(sys.argv) == 4: # if width and height are specified from command line\n module_width = int(sys.argv[5])\n module_height = int(sys.argv[6])\n assert module_width * module_height >= module_num_nodes\n else:\n square_size = _square_size(module_num_nodes)\n module_width, module_height = square_size, square_size\n\n for i in range(module_num_nodes):\n n = Node(i, module_num_nodes * [0])\n module_nodes.append(n)\n\n # body of a module\n else:\n tokens = line.split(' ') # expected format of each line is \"src dst\"\n assert len(tokens) == 2\n src, dst = int(tokens[0]), int(tokens[1])\n _add_edge(src, dst)\n\n line = f.readline() \n\n nodes_dic[module_name] = module_nodes # save last module's info\n tl[module_name] = Point(0, 0)\n br[module_name] = Point(module_width - 1, module_height - 1)\n\n return nodes_dic, tl, br", "def load_inferred(inference_path, extractors, whitelist):\n with gzip.GzipFile(inference_path) as f:\n # with open(inference_path) as f:\n with nlj.open(f, json_lib='ujson') as src:\n for row in src:\n if whitelist is not None and row['mmsi'] not in whitelist:\n continue\n # Parsing dates is expensive and all extractors use dates, so parse them\n # once up front\n row['start_time'] = _parse(row['start_time'])\n #dateutil.parser.parse(row['start_time'])\n for ext in extractors:\n ext.extract(row)\n for ext in extractors:\n ext.finalize()", "def read_graph(filename):\n with open(filename) as f:\n g = eval(f.read())\n return g", "def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data", "def _deserialize(cls, *args):\n return cls(*args)", "def load(path: str):\n\n lprint(f\"Loading feature generator from: {path}\")\n with open(join(path, \"feature_generation.json\"), \"r\") as json_file:\n frozen = json_file.read()\n\n thawed = jsonpickle.decode(frozen)\n\n thawed._load_internals(path)\n\n return thawed", "def deserialize(self, data):\n data = data.split(\",\")\n # print(data)\n self.idx = 0\n \n def dfs():\n if data[self.idx] == 'N':\n self.idx += 1\n return None\n node = TreeNode(int(data[self.idx]))\n self.idx += 1\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()", "def iterdecode(self,\n lines: Union[Iterable[str], str]) -> Iterator[Graph]:\n for tree in self.iterparse(lines):\n yield layout.interpret(tree, self.model)", "def load_data():\n\n if global_deserializer is None:\n raise SystemExit('global de-serializer was not set')\n\n return global_deserializer(input())", "def load_graph(self,dataset):\n dataset = cd.build_dataset_from_name(dataset)\n self.data = dataset[0]\n G = nx.Graph()\n G.add_edges_from(self.data.edge_index.t().tolist())\n return G", "def __init__(self, netlist_file):\n with open(netlist_file, 'r') as f:\n self.netlist = _parse_netlist(f)\n self.G = _create_graph(self.netlist)", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def get_deserializer(name):\n\n loads = importlib.import_module(name).loads\n\n # Serializers that handle unicode streams and a are safe against comments\n # can be used directly\n if name == 'json':\n import json\n return json.loads\n\n def deserializer(x):\n # Load base85 bytes data\n x = x[1:].encode('ascii')\n x = base64.b85decode(x)\n try:\n return loads(x)\n except Exception as ex:\n raise SerializationError(ex)\n\n return deserializer", "def __init__(self, path, input_type='f'):\n if input_type == 'f':\n file = open(path, 'r')\n elif input_type == 's':\n file = path\n else:\n raise exceptions.BadInputError(f\"invalid input type {input_type}\")\n\n pdl = yaml.safe_load(file)\n\n self.type_checks = {\n 'typedef': self.validate_typedef,\n 'component': self.validate_component,\n 'graph': self.validate_graph,\n }\n\n self.imports = []\n if 'import' in pdl:\n self.imports = pdl['import']\n\n self.namespace = pdl['name']\n self.body = pdl['body']\n self.typedefs = {}\n self.components = []\n self.graphs = []\n self.validate()", "def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numelem = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.elems = []\n\n for _ in range(self.numelem):\n elem = FRDElem()\n self.elems.append(elem)\n if self.format < 2:\n in_file.read(1)\n elem.key = int(in_file.read(2))\n elem.number = int(in_file.read(5*(self.format+1)))\n elem.type = int(in_file.read(5))\n elem.group = int(in_file.read(5))\n elem.material = int(in_file.read(5))\n in_file.read(1) # eol\n elem.nodes = []\n num_nodes = FRDElem.nodesPerType[elem.type]\n num_lines = int(num_nodes/(5*(3-self.format)+1))+1\n for j in range(num_lines):\n in_file.read(3) # pad byte and key = -2\n k_start = j*5*(3-self.format)\n k_end = min(num_nodes - k_start, (j+1)*5*(3-self.format))\n for _ in range(0, k_end):\n elem.nodes.append(\n int(in_file.read(5*(self.format+1))))\n in_file.read(1) # eol\n else:\n elem.number = struct.unpack('i', in_file.read(4))[0]\n elem.type = struct.unpack('i', in_file.read(4))[0]\n num_nodes = FRDElem.nodesPerType[elem.type]\n elem.group = struct.unpack('i', in_file.read(4))[0]\n elem.material = struct.unpack('i', in_file.read(4))[0]\n elem.nodes = struct.unpack(\n 'i'*num_nodes, in_file.read(num_nodes*4))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def input_graph(graph_class=Graph) -> list[Graph]:\n return load_graph(f=sys.stdin, graph_class=graph_class)", "def parse_triangle_files(self):\n nodes = {}\n boundary_nodes = []\n\n # parse node file into nodes\n with open(self.files['node']) as node_file:\n header = True\n for line in node_file:\n if header:\n header = False\n continue\n content = list(filter(lambda a: bool(a), line.split(' '))) # pylint: disable=W0108\n if not '#' in content[0]:\n is_boundary = content[3] == '1\\n'\n nodes[int(content[0])] = {\n 'id': int(content[0]),\n 'coords': [int(content[1]), int(content[2])],\n 'distance': 0 if is_boundary else None,\n 'relations': [],\n 'level_cycles': [], # ids of any level cycles this node is a part of\n 'level_paths': [], # ids of any level paths this node is a part of\n 'is_root_element': False,\n 'betweener_paths': []\n }\n if is_boundary:\n boundary_nodes.append(int(content[0]))\n node_file.close()\n\n # parse edge files into node relations\n with open(self.files['edge']) as edge_file:\n header = True\n for line in edge_file:\n if header:\n header = False\n continue\n content = list(filter(bool, line.split(' ')))\n if not '#' in content[0]:\n nodes[int(content[1])]['relations'].append(int(content[2]))\n nodes[int(content[2])]['relations'].append(int(content[1]))\n edge_file.close()\n\n # with open(self.files['ele']) as ele_file:\n # header = True\n # for line in edge_file:\n # if header:\n # header = False\n # continue\n # content = list(filter(bool, line.split(' ')))\n # if not '#' in content[0]:\n # nodes[int(content[1])]['relations'].append(int(content[2]))\n # nodes[int(content[2])]['relations'].append(int(content[1]))\n # edge_file.close()\n\n # sorts relations clockwise\n for node_id, node in nodes.items():\n nodes[node_id]['relations'] = sorted(node['relations'], key=(\n lambda related_node_id: (\n self.calculate_clockwise_angle_and_distance(node, nodes.get(related_node_id)) # pylint: disable=W0640\n )\n ))\n\n levels = self.get_levels(nodes, boundary_nodes)\n\n for level in levels:\n for node_id in level['node_ids']:\n self.identify_special_nodes(nodes, node_id)\n\n return nodes, boundary_nodes, levels", "def input_graph(graph_class=Graph, read_list: bool = False) -> Union[Tuple[List[Graph], List[str]], Graph]:\n return load_graph(f=sys.stdin, graph_class=graph_class, read_list=read_list)", "def __init__(self, graph_yaml):\n\n self.sources = []\n self.sinks = []\n\n # Parse graph YAML\n cells, convs = parse_graph(graph_yaml)\n\n # Create dictionary of cells\n cells_dict = {}\n for cell in cells:\n if cell.name in cells_dict:\n # FIXME print out both the cells\n raise ValueError(\"Duplicate name %s\" % (name))\n\n cells_dict[cell.name] = cell\n if cell.type == \"source\":\n cell.ops[\"INSTANTIATE\"] = 0\n self.sources.append(cell)\n if cell.type == \"sink\":\n cell.ops[\"TERMINATE\"] = 0\n self.sinks.append(cell)\n\n # Create graph\n for conv in convs:\n if conv.input not in cells_dict:\n raise ValueError(\"No cell with name %s\" % (conv.input))\n cell = cells_dict[conv.input]\n cell.output_convs.append(conv)\n conv.input = cell\n\n if conv.output not in cells_dict:\n raise ValueError(\"No cell with name %s\" % (conv.output))\n cell = cells_dict[conv.output]\n cell.input_convs.append(conv)\n conv.output = cell", "def from_dict(cls, dikt) -> 'DataFileFormat':\n return util.deserialize_model(dikt, cls)", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HFlatten2, self).__init__(name='HFlatten2', num_nodes=117, edges=[])\n \n # Add the edges\n self.add_edges([(5, 66), (66, 50), (5, 67), (67, 51), (5, 35), (35, 20), (5, 36), (36, 21), (11, 68), (68, 52), (6, 37), (37, 22), (7, 38), (38, 23), (8, 39), (39, 24), (12, 69), (69, 53), (15, 40), (40, 25), (16, 41), (41, 26), (17, 42), (42, 27), (0, 70), (70, 54), (0, 43), (43, 28), (3, 71), (71, 55), (3, 72), (72, 56), (3, 44), (44, 29), (4, 73), (73, 57), (4, 74), (74, 58), (4, 45), (45, 30), (9, 75), (75, 59), (9, 76), (76, 60), (9, 46), (46, 31), (13, 77), (77, 61), (1, 78), (78, 62), (14, 79), (79, 63), (2, 80), (80, 64), (2, 81), (81, 65), (2, 47), (47, 32), (18, 48), (48, 33), (19, 49), (49, 34), (5, 98), (98, 7), (5, 99), (99, 12), (5, 100), (100, 15), (5, 101), (101, 17), (5, 102), (102, 0), (5, 103), (103, 3), (5, 104), (104, 9), (5, 105), (105, 14), (9, 106), (106, 8), (9, 107), (107, 4), (9, 108), (108, 13), (9, 109), (109, 2), (9, 110), (110, 18), (9, 111), (111, 19), (10, 112), (112, 5), (10, 113), (113, 11), (10, 114), (114, 6), (10, 115), (115, 16), (10, 116), (116, 1), (29, 82), (82, 60), (31, 83), (83, 54), (28, 84), (84, 53), (28, 85), (85, 63), (25, 86), (86, 56), (27, 87), (87, 55), (22, 88), (88, 51), (26, 89), (89, 50), (21, 90), (90, 62), (20, 91), (91, 52), (24, 92), (92, 64), (32, 93), (93, 61), (30, 94), (94, 65), (33, 95), (95, 58), (34, 96), (96, 57), (23, 97), (97, 59)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'Simulink'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"Flatten2\"\"\"\n self[\"GUID__\"] = UUID('3bd37131-b783-49da-b347-b00a25f97e1e')\n \n # Set the node attributes\n self.vs[0][\"Name\"] = \"\"\"Gain2\"\"\"\n self.vs[0][\"SampleTime\"] = -1.0\n self.vs[0][\"gain\"] = 5.4\n self.vs[0][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[0][\"Position\"] = pickle.loads(\"\"\"(lp1\nF405\naF99\naF445\naF131\na.\"\"\")\n self.vs[0][\"mm__\"] = \"\"\"Gain\"\"\"\n self.vs[0][\"GUID__\"] = UUID('aa88c5b8-9e26-46a0-ac27-c2ce5ead2aab')\n self.vs[1][\"NumInputPorts\"] = \"\"\"1\"\"\"\n self.vs[1][\"Name\"] = \"\"\"Scope\"\"\"\n self.vs[1][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[1][\"Position\"] = pickle.loads(\"\"\"(lp1\nF345\naF129\naF375\naF161\na.\"\"\")\n self.vs[1][\"mm__\"] = \"\"\"Scope\"\"\"\n self.vs[1][\"LimitDataPoints\"] = \"\"\"on\"\"\"\n self.vs[1][\"GUID__\"] = UUID('5b3d0f44-79dd-4361-baa2-e5158af03f75')\n self.vs[2][\"Name\"] = \"\"\"Sum\"\"\"\n self.vs[2][\"Inputs\"] = \"\"\"|++\"\"\"\n self.vs[2][\"SampleTime\"] = -1.0\n self.vs[2][\"IconShape\"] = \"\"\"round\"\"\"\n self.vs[2][\"BackgroundColor\"] = \"\"\"lightBlue\"\"\"\n self.vs[2][\"Position\"] = pickle.loads(\"\"\"(lp1\nF280\naF90\naF300\naF110\na.\"\"\")\n self.vs[2][\"mm__\"] = \"\"\"Sum\"\"\"\n self.vs[2][\"GUID__\"] = UUID('c3f1b72b-f864-4dc4-a9ec-4b1768272323')\n self.vs[3][\"Name\"] = \"\"\"Product2\"\"\"\n self.vs[3][\"SampleTime\"] = -1.0\n self.vs[3][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[3][\"Position\"] = pickle.loads(\"\"\"(lp1\nF185\naF177\naF215\naF208\na.\"\"\")\n self.vs[3][\"mm__\"] = \"\"\"Product\"\"\"\n self.vs[3][\"GUID__\"] = UUID('2116c172-b8c5-4f25-9cfb-c9a8bc23e063')\n self.vs[4][\"Name\"] = \"\"\"Product3\"\"\"\n self.vs[4][\"SampleTime\"] = -1.0\n self.vs[4][\"BackgroundColor\"] = \"\"\"lightBlue\"\"\"\n self.vs[4][\"Position\"] = pickle.loads(\"\"\"(lp1\nF225\naF127\naF255\naF158\na.\"\"\")\n self.vs[4][\"mm__\"] = \"\"\"Product\"\"\"\n self.vs[4][\"GUID__\"] = UUID('30f0a0a1-0c57-4801-8224-c52dc4871906')\n self.vs[5][\"Name\"] = \"\"\"Subsystem\"\"\"\n self.vs[5][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[5][\"Position\"] = pickle.loads(\"\"\"(lp1\nF145\naF89\naF245\naF131\na.\"\"\")\n self.vs[5][\"mm__\"] = \"\"\"SubSystem\"\"\"\n self.vs[5][\"GUID__\"] = UUID('5b78ddd3-6f58-47dd-8f61-985d21cf2e6d')\n self.vs[6][\"Name\"] = \"\"\"Constant\"\"\"\n self.vs[6][\"SampleTime\"] = inf\n self.vs[6][\"value\"] = 134.67\n self.vs[6][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[6][\"Position\"] = pickle.loads(\"\"\"(lp1\nF30\naF127\naF80\naF163\na.\"\"\")\n self.vs[6][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[6][\"GUID__\"] = UUID('a3b6dd66-2c10-4435-97f8-6bfd668c9675')\n self.vs[7][\"Name\"] = \"\"\"Constant2\"\"\"\n self.vs[7][\"SampleTime\"] = inf\n self.vs[7][\"value\"] = 12.34\n self.vs[7][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[7][\"Position\"] = pickle.loads(\"\"\"(lp1\nF175\naF120\naF220\naF150\na.\"\"\")\n self.vs[7][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[7][\"GUID__\"] = UUID('bc283ed6-240c-47ea-8c44-555e26976de9')\n self.vs[8][\"Name\"] = \"\"\"Constant\"\"\"\n self.vs[8][\"SampleTime\"] = inf\n self.vs[8][\"value\"] = 66598.0\n self.vs[8][\"BackgroundColor\"] = \"\"\"lightBlue\"\"\"\n self.vs[8][\"Position\"] = pickle.loads(\"\"\"(lp1\nF205\naF69\naF250\naF101\na.\"\"\")\n self.vs[8][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[8][\"GUID__\"] = UUID('47141a82-efb0-40f9-b21f-bc20e042605a')\n self.vs[9][\"Name\"] = \"\"\"Subsystem2\"\"\"\n self.vs[9][\"BackgroundColor\"] = \"\"\"lightBlue\"\"\"\n self.vs[9][\"Position\"] = pickle.loads(\"\"\"(lp1\nF270\naF134\naF370\naF176\na.\"\"\")\n self.vs[9][\"mm__\"] = \"\"\"SubSystem\"\"\"\n self.vs[9][\"GUID__\"] = UUID('8d319c42-24b2-4033-a93b-1769106af470')\n self.vs[10][\"Name\"] = \"\"\"Flatten2\"\"\"\n self.vs[10][\"Position\"] = pickle.loads(\"\"\"(lp1\n.\"\"\")\n self.vs[10][\"mm__\"] = \"\"\"SubSystem\"\"\"\n self.vs[10][\"GUID__\"] = UUID('3ff74440-7f12-4691-9bb4-fecc2804b8ca')\n self.vs[11][\"Name\"] = \"\"\"Out1\"\"\"\n self.vs[11][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[11][\"Position\"] = pickle.loads(\"\"\"(lp1\nF355\naF98\naF385\naF112\na.\"\"\")\n self.vs[11][\"mm__\"] = \"\"\"Outport\"\"\"\n self.vs[11][\"Port\"] = 1\n self.vs[11][\"GUID__\"] = UUID('37ae989e-8191-4230-800f-c25db780344b')\n self.vs[12][\"Name\"] = \"\"\"Out2\"\"\"\n self.vs[12][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[12][\"Position\"] = pickle.loads(\"\"\"(lp1\nF465\naF188\naF495\naF202\na.\"\"\")\n self.vs[12][\"mm__\"] = \"\"\"Outport\"\"\"\n self.vs[12][\"Port\"] = 2\n self.vs[12][\"GUID__\"] = UUID('b55605ee-5f95-43bb-bc15-517dcb5a6077')\n self.vs[13][\"Name\"] = \"\"\"Out1\"\"\"\n self.vs[13][\"BackgroundColor\"] = \"\"\"lightBlue\"\"\"\n self.vs[13][\"Position\"] = pickle.loads(\"\"\"(lp1\nF355\naF108\naF385\naF122\na.\"\"\")\n self.vs[13][\"mm__\"] = \"\"\"Outport\"\"\"\n self.vs[13][\"Port\"] = 1\n self.vs[13][\"GUID__\"] = UUID('2d73df35-44b9-4ae3-8a33-80439e9ea242')\n self.vs[14][\"Name\"] = \"\"\"Out1\"\"\"\n self.vs[14][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[14][\"Position\"] = pickle.loads(\"\"\"(lp1\nF475\naF108\naF505\naF122\na.\"\"\")\n self.vs[14][\"mm__\"] = \"\"\"Outport\"\"\"\n self.vs[14][\"Port\"] = 1\n self.vs[14][\"GUID__\"] = UUID('cc231818-18b3-4628-b567-61cecc568877')\n self.vs[15][\"Name\"] = \"\"\"In2\"\"\"\n self.vs[15][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[15][\"Position\"] = pickle.loads(\"\"\"(lp1\nF40\naF193\naF70\naF207\na.\"\"\")\n self.vs[15][\"mm__\"] = \"\"\"Inport\"\"\"\n self.vs[15][\"Port\"] = 2\n self.vs[15][\"GUID__\"] = UUID('48ee4de9-4f36-40a8-b9ea-91985af85c43')\n self.vs[16][\"Name\"] = \"\"\"In1\"\"\"\n self.vs[16][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[16][\"Position\"] = pickle.loads(\"\"\"(lp1\nF40\naF48\naF70\naF62\na.\"\"\")\n self.vs[16][\"mm__\"] = \"\"\"Inport\"\"\"\n self.vs[16][\"Port\"] = 1\n self.vs[16][\"GUID__\"] = UUID('abbcc9b5-a037-4543-94fd-e9e07898e0fd')\n self.vs[17][\"Name\"] = \"\"\"In1\"\"\"\n self.vs[17][\"BackgroundColor\"] = \"\"\"yellow\"\"\"\n self.vs[17][\"Position\"] = pickle.loads(\"\"\"(lp1\nF40\naF133\naF70\naF147\na.\"\"\")\n self.vs[17][\"mm__\"] = \"\"\"Inport\"\"\"\n self.vs[17][\"Port\"] = 1\n self.vs[17][\"GUID__\"] = UUID('73d6aff1-3f45-45c1-9c13-8bea418fc6e0')\n self.vs[18][\"Name\"] = \"\"\"In2\"\"\"\n self.vs[18][\"BackgroundColor\"] = \"\"\"lightBlue\"\"\"\n self.vs[18][\"Position\"] = pickle.loads(\"\"\"(lp1\nF115\naF158\naF145\naF172\na.\"\"\")\n self.vs[18][\"mm__\"] = \"\"\"Inport\"\"\"\n self.vs[18][\"Port\"] = 2\n self.vs[18][\"GUID__\"] = UUID('f910f910-3b72-4d34-ba33-b1005cba5f1e')\n self.vs[19][\"Name\"] = \"\"\"In1\"\"\"\n self.vs[19][\"BackgroundColor\"] = \"\"\"lightBlue\"\"\"\n self.vs[19][\"Position\"] = pickle.loads(\"\"\"(lp1\nF110\naF103\naF140\naF117\na.\"\"\")\n self.vs[19][\"mm__\"] = \"\"\"Inport\"\"\"\n self.vs[19][\"Port\"] = 1\n self.vs[19][\"GUID__\"] = UUID('775fc836-56be-481d-821a-ddb8ad3fcdf2')\n self.vs[20][\"Name\"] = \"\"\"1\"\"\"\n self.vs[20][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[20][\"GUID__\"] = UUID('09c29cf7-9e1d-494b-a475-dfc2d49a1888')\n self.vs[21][\"Name\"] = \"\"\"2\"\"\"\n self.vs[21][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[21][\"GUID__\"] = UUID('98e3375b-1e6b-4f23-a5b8-69ae5a078f66')\n self.vs[22][\"Name\"] = \"\"\"1\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[22][\"GUID__\"] = UUID('d059abe7-06b2-4d42-8eb2-13ec4f2b0605')\n self.vs[23][\"Name\"] = \"\"\"1\"\"\"\n self.vs[23][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[23][\"GUID__\"] = UUID('f9b1025f-94a8-4414-9e1f-0c8d88dfa1bb')\n self.vs[24][\"Name\"] = \"\"\"1\"\"\"\n self.vs[24][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[24][\"GUID__\"] = UUID('e7857c2e-3c19-4c69-b716-88ec14c15e2f')\n self.vs[25][\"Name\"] = \"\"\"1\"\"\"\n self.vs[25][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[25][\"GUID__\"] = UUID('c8c2d6da-7413-42d8-a87e-41c7a132be22')\n self.vs[26][\"Name\"] = \"\"\"1\"\"\"\n self.vs[26][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[26][\"GUID__\"] = UUID('16517dd7-a328-44cd-beea-2ef80dcae619')\n self.vs[27][\"Name\"] = \"\"\"1\"\"\"\n self.vs[27][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[27][\"GUID__\"] = UUID('d961915e-3cd7-4b60-80d6-8be1f5192e27')\n self.vs[28][\"Name\"] = \"\"\"1\"\"\"\n self.vs[28][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[28][\"GUID__\"] = UUID('e90742ed-92ec-4a96-b73d-d0193458fe9a')\n self.vs[29][\"Name\"] = \"\"\"1\"\"\"\n self.vs[29][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[29][\"GUID__\"] = UUID('9aaacc04-1328-483d-ae38-c5536bd24c00')\n self.vs[30][\"Name\"] = \"\"\"1\"\"\"\n self.vs[30][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[30][\"GUID__\"] = UUID('8cf56cf4-bde6-47bd-a01a-98948b37cc05')\n self.vs[31][\"Name\"] = \"\"\"1\"\"\"\n self.vs[31][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[31][\"GUID__\"] = UUID('23a56bf4-b95c-406e-a94a-9b1d95b08c95')\n self.vs[32][\"Name\"] = \"\"\"1\"\"\"\n self.vs[32][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[32][\"GUID__\"] = UUID('01de4a4e-867b-4fa2-88ab-18138ebb83c5')\n self.vs[33][\"Name\"] = \"\"\"1\"\"\"\n self.vs[33][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[33][\"GUID__\"] = UUID('be0b168e-5e87-4c60-b243-ae86ae4470fd')\n self.vs[34][\"Name\"] = \"\"\"1\"\"\"\n self.vs[34][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[34][\"GUID__\"] = UUID('ba8ba12b-7ae9-42c8-bcab-59c39b7219c9')\n self.vs[35][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[35][\"GUID__\"] = UUID('2b94a8e3-5dc8-4ef9-a369-9fa28dfa4a25')\n self.vs[36][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[36][\"GUID__\"] = UUID('b0686df7-b969-42bc-8321-34d0785ae81f')\n self.vs[37][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[37][\"GUID__\"] = UUID('e98f9e88-df30-44e1-a37c-585d02b58d3a')\n self.vs[38][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[38][\"GUID__\"] = UUID('9e379931-decd-49d3-a71d-81ddb0393c9f')\n self.vs[39][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[39][\"GUID__\"] = UUID('9e25ae89-9a4f-4d34-87a9-fdbd86781309')\n self.vs[40][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[40][\"GUID__\"] = UUID('bc892a1a-16d0-45b1-8d24-e9e45706d26a')\n self.vs[41][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[41][\"GUID__\"] = UUID('3880bb62-5210-410c-80e1-1658b01a8a8d')\n self.vs[42][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[42][\"GUID__\"] = UUID('982d02b4-bb03-41fc-b77e-5fc3f575a85c')\n self.vs[43][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[43][\"GUID__\"] = UUID('0cdd9c41-72cb-4321-bc3b-2629c260ca43')\n self.vs[44][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[44][\"GUID__\"] = UUID('8871d75b-0be0-4e76-a709-eb7e61949647')\n self.vs[45][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[45][\"GUID__\"] = UUID('b5b05072-d6a5-4d70-9b73-211a77b53684')\n self.vs[46][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[46][\"GUID__\"] = UUID('30d22c6e-df70-49bd-96e2-abd1a927077e')\n self.vs[47][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[47][\"GUID__\"] = UUID('a1772768-d323-45fa-b7ef-095dd4fa24aa')\n self.vs[48][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[48][\"GUID__\"] = UUID('092ee6ee-095f-454e-b6e7-34332f8a27a0')\n self.vs[49][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[49][\"GUID__\"] = UUID('8ef11b47-2e19-475d-b004-ff80e618ac28')\n self.vs[50][\"Name\"] = \"\"\"1\"\"\"\n self.vs[50][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[50][\"GUID__\"] = UUID('c21cd5ea-4f2e-4c79-a7b2-b1ededf7224f')\n self.vs[51][\"Name\"] = \"\"\"2\"\"\"\n self.vs[51][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[51][\"GUID__\"] = UUID('f2f40662-6db0-45b6-99f7-faf9d0826cb0')\n self.vs[52][\"Name\"] = \"\"\"1\"\"\"\n self.vs[52][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[52][\"GUID__\"] = UUID('a86461b0-f516-4b01-a8b9-df002de2936c')\n self.vs[53][\"Name\"] = \"\"\"1\"\"\"\n self.vs[53][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[53][\"GUID__\"] = UUID('d00fb4a0-24cc-43c8-a30b-2630fc5b5576')\n self.vs[54][\"Name\"] = \"\"\"1\"\"\"\n self.vs[54][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[54][\"GUID__\"] = UUID('0a914718-ec1c-42d8-9d25-e8921e969ac1')\n self.vs[55][\"Name\"] = \"\"\"1\"\"\"\n self.vs[55][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[55][\"GUID__\"] = UUID('0e7f61a7-ab89-4775-90ab-401bfdf9acb9')\n self.vs[56][\"Name\"] = \"\"\"2\"\"\"\n self.vs[56][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[56][\"GUID__\"] = UUID('1b8f219a-d034-478c-8239-ae16bcfe3b24')\n self.vs[57][\"Name\"] = \"\"\"1\"\"\"\n self.vs[57][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[57][\"GUID__\"] = UUID('5af6ee33-6a1c-4c8e-8d75-2a76393c2610')\n self.vs[58][\"Name\"] = \"\"\"2\"\"\"\n self.vs[58][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[58][\"GUID__\"] = UUID('9d78e402-c0c7-457e-83f9-aee3dca00144')\n self.vs[59][\"Name\"] = \"\"\"1\"\"\"\n self.vs[59][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[59][\"GUID__\"] = UUID('68269617-a0a6-4804-9a5f-ce2575dd17d9')\n self.vs[60][\"Name\"] = \"\"\"2\"\"\"\n self.vs[60][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[60][\"GUID__\"] = UUID('bdebfbac-2308-4f82-a610-4903c6b126be')\n self.vs[61][\"Name\"] = \"\"\"1\"\"\"\n self.vs[61][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[61][\"GUID__\"] = UUID('cb37b8bb-0d28-4954-9ade-e1c58e36deb0')\n self.vs[62][\"Name\"] = \"\"\"1\"\"\"\n self.vs[62][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[62][\"GUID__\"] = UUID('3efb5d21-0e4a-4f35-9f13-33f5269c5d27')\n self.vs[63][\"Name\"] = \"\"\"1\"\"\"\n self.vs[63][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[63][\"GUID__\"] = UUID('7480d4ea-e5c9-4369-8beb-44a82010a9f4')\n self.vs[64][\"Name\"] = \"\"\"1\"\"\"\n self.vs[64][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[64][\"GUID__\"] = UUID('b8d9a531-9b5e-4ab2-a4a9-f1910367b255')\n self.vs[65][\"Name\"] = \"\"\"2\"\"\"\n self.vs[65][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[65][\"GUID__\"] = UUID('a82e9ec6-04f3-4921-ab95-672320b1c54f')\n self.vs[66][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[66][\"GUID__\"] = UUID('f0398ee2-f9fe-4c0f-8b07-d64be73a3c3b')\n self.vs[67][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[67][\"GUID__\"] = UUID('f9356434-73eb-412b-a349-3b41dda3a1f9')\n self.vs[68][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[68][\"GUID__\"] = UUID('8b93f3e8-8b35-4950-b6db-99071419c97a')\n self.vs[69][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[69][\"GUID__\"] = UUID('580eebf0-8650-40d5-ac8c-9ebc4611d8b4')\n self.vs[70][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[70][\"GUID__\"] = UUID('3c00ad24-ff30-49ba-8aa9-a489e92ac971')\n self.vs[71][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[71][\"GUID__\"] = UUID('ad7f53ea-df4a-42dd-927c-dee91a28c68f')\n self.vs[72][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[72][\"GUID__\"] = UUID('18e453f9-715a-4c21-810e-db6c14ea391e')\n self.vs[73][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[73][\"GUID__\"] = UUID('d57011fb-5626-45e0-9720-dfeeec025492')\n self.vs[74][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[74][\"GUID__\"] = UUID('329d90a2-8091-435f-a230-e66273f96ad4')\n self.vs[75][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[75][\"GUID__\"] = UUID('85e5ff0f-bb4e-4ffc-8547-a2d3339668ad')\n self.vs[76][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[76][\"GUID__\"] = UUID('242a9924-011c-4ca0-a14e-ff940d8470e6')\n self.vs[77][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[77][\"GUID__\"] = UUID('25a81afa-35ec-4361-9fb2-b0fab39f0e74')\n self.vs[78][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[78][\"GUID__\"] = UUID('72daf75d-a55c-4da8-b6fa-540ecc5890fe')\n self.vs[79][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[79][\"GUID__\"] = UUID('85222c53-252e-481b-92cd-367af4ff2bc6')\n self.vs[80][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[80][\"GUID__\"] = UUID('1babbcb5-911d-46e9-b491-c2db5ee4c8f2')\n self.vs[81][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[81][\"GUID__\"] = UUID('c53cd074-98e0-4a02-804e-d36a8729174c')\n self.vs[82][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[82][\"GUID__\"] = UUID('3acc69e0-9e76-4e28-adc6-a0542777972c')\n self.vs[83][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[83][\"GUID__\"] = UUID('3ce8d214-0b7f-41a6-b852-f91c45b393ce')\n self.vs[84][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[84][\"GUID__\"] = UUID('472527a3-dc6c-48bf-a61c-174e136fd519')\n self.vs[85][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[85][\"GUID__\"] = UUID('c025134c-d29e-4a05-a487-9c34655d05c8')\n self.vs[86][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[86][\"GUID__\"] = UUID('177e3050-d372-4d20-8769-cf3cfc1c4f89')\n self.vs[87][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[87][\"GUID__\"] = UUID('b051d0ba-e75c-4e93-a75d-3fdbda8b13e6')\n self.vs[88][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[88][\"GUID__\"] = UUID('59f711e9-c681-42f8-99f4-fd5d5ed4e60b')\n self.vs[89][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[89][\"GUID__\"] = UUID('20d2d0cd-3e4a-41c1-b825-e4272a79b938')\n self.vs[90][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[90][\"GUID__\"] = UUID('5bae399e-9a12-4b57-a2b9-14a03192e5ed')\n self.vs[91][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[91][\"GUID__\"] = UUID('22db28a4-4de4-4dd4-9f32-c3e09badff15')\n self.vs[92][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[92][\"GUID__\"] = UUID('8f2fa4e8-ed1f-43d7-9827-8b99db4ef332')\n self.vs[93][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[93][\"GUID__\"] = UUID('1e048894-952a-48e8-9d84-0c4527393ca2')\n self.vs[94][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[94][\"GUID__\"] = UUID('be223435-891f-4466-b9a5-cdec06256b63')\n self.vs[95][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[95][\"GUID__\"] = UUID('6b94ff1d-1cce-4ec9-a298-c94f259741ec')\n self.vs[96][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[96][\"GUID__\"] = UUID('05a63986-edc0-49b1-9365-69860d0a89d4')\n self.vs[97][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[97][\"GUID__\"] = UUID('4a932950-2fab-4ce3-9767-484dbe084290')\n self.vs[98][\"Name\"] = \"\"\"None\"\"\"\n self.vs[98][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[98][\"GUID__\"] = UUID('a31037cd-dace-43cf-9987-8a0610c0c07f')\n self.vs[99][\"Name\"] = \"\"\"None\"\"\"\n self.vs[99][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[99][\"GUID__\"] = UUID('ea24b961-26eb-4c44-93d4-0f15cad67bab')\n self.vs[100][\"Name\"] = \"\"\"None\"\"\"\n self.vs[100][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[100][\"GUID__\"] = UUID('5e671a7c-7539-41af-958c-fe48d4e31809')\n self.vs[101][\"Name\"] = \"\"\"None\"\"\"\n self.vs[101][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[101][\"GUID__\"] = UUID('9749ed46-6409-4b18-8057-36f1d9a6ef1c')\n self.vs[102][\"Name\"] = \"\"\"None\"\"\"\n self.vs[102][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[102][\"GUID__\"] = UUID('36ab22fb-634f-47ca-b65d-e8dc064fd022')\n self.vs[103][\"Name\"] = \"\"\"None\"\"\"\n self.vs[103][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[103][\"GUID__\"] = UUID('daed977f-8833-405c-b5a9-511c3cf7b53a')\n self.vs[104][\"Name\"] = \"\"\"None\"\"\"\n self.vs[104][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[104][\"GUID__\"] = UUID('7ee00228-b980-4c88-8149-dc4881379102')\n self.vs[105][\"Name\"] = \"\"\"None\"\"\"\n self.vs[105][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[105][\"GUID__\"] = UUID('d8832334-a7ee-415c-b24c-26eadc8935be')\n self.vs[106][\"Name\"] = \"\"\"None\"\"\"\n self.vs[106][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[106][\"GUID__\"] = UUID('3b4c3970-2d19-4742-85c1-b83094b4a3b4')\n self.vs[107][\"Name\"] = \"\"\"None\"\"\"\n self.vs[107][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[107][\"GUID__\"] = UUID('ea32d964-6098-4204-9e7a-6a62dd1184bd')\n self.vs[108][\"Name\"] = \"\"\"None\"\"\"\n self.vs[108][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[108][\"GUID__\"] = UUID('ae5f7a4a-3ba4-449e-a8d5-453cd67010b9')\n self.vs[109][\"Name\"] = \"\"\"None\"\"\"\n self.vs[109][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[109][\"GUID__\"] = UUID('c8b62e5b-34a8-47b4-8720-d3d25e8f5dd7')\n self.vs[110][\"Name\"] = \"\"\"None\"\"\"\n self.vs[110][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[110][\"GUID__\"] = UUID('59e6c5dc-1412-4ee8-8faf-431f82283f4b')\n self.vs[111][\"Name\"] = \"\"\"None\"\"\"\n self.vs[111][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[111][\"GUID__\"] = UUID('248f6796-1962-4699-ada5-0dbcbdead522')\n self.vs[112][\"Name\"] = \"\"\"None\"\"\"\n self.vs[112][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[112][\"GUID__\"] = UUID('0da03d23-08bb-4c83-ad76-a3bc789442de')\n self.vs[113][\"Name\"] = \"\"\"None\"\"\"\n self.vs[113][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[113][\"GUID__\"] = UUID('6bc7010c-4f9b-444d-80ea-c17bfa7b86df')\n self.vs[114][\"Name\"] = \"\"\"None\"\"\"\n self.vs[114][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[114][\"GUID__\"] = UUID('b553d6be-a275-4e58-b106-e3a1e5294b9b')\n self.vs[115][\"Name\"] = \"\"\"None\"\"\"\n self.vs[115][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[115][\"GUID__\"] = UUID('9a4025bf-92c3-4602-a0e5-75d273769abd')\n self.vs[116][\"Name\"] = \"\"\"None\"\"\"\n self.vs[116][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[116][\"GUID__\"] = UUID('0d38375f-caf8-42a6-a4db-a5d72cd034c6')", "def read_graph(filename):\n return nx.read_edgelist(filename, create_using=nx.DiGraph(), nodetype=str)" ]
[ "0.6980027", "0.5925386", "0.56901735", "0.5565185", "0.5561407", "0.5539062", "0.55311584", "0.55090475", "0.54942447", "0.54745305", "0.54636663", "0.5446424", "0.543744", "0.54293144", "0.54181665", "0.5398218", "0.5389438", "0.5359855", "0.5344951", "0.5344879", "0.5318164", "0.53167665", "0.53122747", "0.5311478", "0.5289722", "0.52754915", "0.5258244", "0.5217457", "0.52146727", "0.5208391", "0.51965845", "0.5196137", "0.51846576", "0.516229", "0.51469857", "0.5146489", "0.5145111", "0.5140835", "0.5125298", "0.51100093", "0.5105245", "0.51045495", "0.51007146", "0.50946814", "0.50811714", "0.5063938", "0.505715", "0.50527525", "0.5049969", "0.50493574", "0.5038802", "0.5028762", "0.5013838", "0.50035316", "0.499624", "0.4983743", "0.4981724", "0.49719712", "0.49692294", "0.4968056", "0.49676618", "0.49592215", "0.49554113", "0.49537185", "0.49488825", "0.49463084", "0.49447814", "0.4937619", "0.49370936", "0.49152374", "0.49107403", "0.4908379", "0.49058425", "0.49056983", "0.49040148", "0.4893319", "0.48857182", "0.48833156", "0.4877522", "0.4874064", "0.48696932", "0.4868638", "0.4868004", "0.48675376", "0.48629385", "0.48608977", "0.48602355", "0.48512107", "0.4849479", "0.48481092", "0.4842463", "0.48414475", "0.48353654", "0.48343801", "0.48282194", "0.48275778", "0.48209497", "0.48202956", "0.4814363", "0.48133805", "0.4804099" ]
0.0
-1
Read a file and return a toposorted hypergraph.
def deserialize(self, filename): f = open(filename) edges_tails = [] nodes = [] # first pass adds incoming edges to nodes for line in f: if '->' in line: # edge edge = self.edge_class() tail_ids, head_id = edge.deserialize(line) nodes[head_id].add_incoming(edge) edges_tails.append((edge, tail_ids)) else: # node node = self.node_class() node.deserialize(line) assert node.id == len(nodes), 'nodes shall appear in order' nodes.append(node) # second pass adds tail nodes to edges for edge, tail_ids in edges_tails: for nid in tail_ids: edge.add_tail(nodes[nid]) f.close() # make a toposorted hypergraph hg = Hypergraph(nodes[0]) hg.nodes = nodes for node in hg: node.hg = hg for edge in hg.edges(): edge.hg = hg hg.tasks_done.add('topo_sort') return hg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_graph(filename):\n G = Hypergraph()\n\n f = open(filename, 'r', encoding='utf8')\n lines = f.readlines()\n if args.weighted:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n weight = line[1]\n G.add_edge(edge_name, line[2:], float(weight))\n else:\n for line in lines:\n line = line.split()\n edge_name = line[0]\n G.add_edge(edge_name, line[1:])\n f.close()\n return G", "def read_graph(file_name):\r\n with open(file_name, 'r') as f:\r\n lines = f.readlines()\r\n first_line = lines[0].strip().split()\r\n no_vertices = int(first_line[0])\r\n new_graph = UndirectedGraph(no_vertices)\r\n for line in lines[1:]:\r\n if line == \"\":\r\n continue\r\n line = line.strip().split()\r\n _from, _to, _cost = int(line[0]), int(line[1]), int(line[2])\r\n new_graph.add_edge(_from, _to, _cost)\r\n return new_graph", "def read_file(path):\n\tG = nx.Graph()\n\n\twith open(path, 'r') as in_file:\n\t\tfor line in in_file:\n\t\t\tcontents = line.split(\" \")\n\t\t\tu = int(contents[0])\n\t\t\tv = int(contents[1])\n\t\t\tstreet_type = int(contents[2])\n\t\t\ttime = int(contents[3])\n\t\t\tlength = int(contents[4])\n\t\t\tcost = 1/float(length)\n\t\t\t\n\t\t\tG.add_node(u)\n\t\t\tG.add_node(v)\n\t\t\tif street_type is 1:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\telse:\n\t\t\t\tG.add_edge(u, v, street_type=street_type, time=time, length=length, cost=cost)\n\t\t\t\tG.add_edge(v, u, street_type=street_type, time=time, length=length, cost=cost)\n\n\treturn G", "def read_graph(filename):\n with open(filename) as f:\n g = eval(f.read())\n return g", "def read_graph():\n return nx.read_edgelist('edges.txt.gz', delimiter='\\t')", "def read_graph(filename):\n return nx.read_edgelist(filename, create_using=nx.DiGraph(), nodetype=str)", "def read_graph():\n return nx.read_edgelist('edges_new.txt', delimiter='\\t')", "def read_graph(filename):\n with open(filename, 'r') as file: # open the file\n # read the number of nodes and number of edges\n num_nodes, num_edges = DataIO.__preprocess_line(file.readline())\n graph = GraphProcessing.construct_null_graph(num_nodes) # construct a null graph\n for line in file.readlines(): # for every line in the file\n preprocessed_line = DataIO.__preprocess_line(line) # preprocess the line\n if preprocessed_line: # if the preprocessed line is not a null string\n # read the first and second node and the edge weight\n source_node, terminal_node, weight = preprocessed_line\n graph[source_node][terminal_node] = weight\n graph[terminal_node][source_node] = weight\n return graph # return the final graph", "def read_graph(filename, directed=True):\n if not directed:\n G = nx.Graph()\n else:\n G = nx.DiGraph()\n with open(filename) as f:\n for line in f:\n d = line.split()\n G.add_edge(int(d[0]), int(d[1]))\n print('Read Graph')\n return G", "def read_graph(filename, node_index_one=0, node_index_two=1):\n tsv = csv.reader(open(filename), delimiter='\\t')\n return make_graph(tsv, node_index_one, node_index_two)", "def read_input_from_file(f):\n f.readline()\n size = int(f.readline().split()[-1])\n nb_edges = int(f.readline().split()[-1])\n\n g = UndirectedGraph()\n\n if parameters.DEBUG:\n print('Build nodes')\n\n nodes = [g.add_node() for _ in range(size)]\n\n if parameters.DEBUG:\n print('Build edges')\n edges = []\n weights = {}\n i = 0\n for i in range(nb_edges):\n if parameters.DEBUG:\n i += 1\n if i % 1000 == 0:\n print('Edge %d / %d' % (i, nb_edges))\n line = f.readline()\n _, u, v, w = line.split()\n\n e = g.add_edge(nodes[int(u) - 1], nodes[int(v) - 1])\n weights[e] = int(w)\n\n edges.append((int(u), int(v), int(w)))\n\n line = f.readline()\n while 'Terminals' not in line:\n line = f.readline()\n if 'SECTION' in line:\n line = f.readline()\n while 'Terminals' not in line:\n line = f.readline()\n nb_terms = int(line.split()[-1])\n terms = []\n for i in range(nb_terms):\n line = f.readline()\n _, t = line.split()\n terms.append(nodes[int(t) - 1])\n\n return instances.SteinerInstance(g, terms, weights)", "def file_parse():\n\n\tfilename = input(\"Enter the file path for your graph: \")\n\ttarget = open(filename, 'r')\n\n\ttarget_lines = [] \t# List of lines from target file\n\t\n\t# Grab the graph count and node/edge count for the first graph\n\ti = 0\n\tfor line in target:\n\t\tif i == 0:\n\t\t\tgraph_count = int(line)\n\t\telif i == 1:\n\t\t\tnode_count = int(line)\n\t\telif i == 2:\n\t\t\tedge_count = int(line)\n\t\telse:\t\n\t\t\ttarget_lines.append(line.strip('\\n'))\n\t\ti += 1\n\n\treturn graph_create(target_lines, graph_count, node_count, edge_count)", "def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!", "def loadDataZachary(fileName):\n\n \"Initialize a graph\"\n G = nx.Graph()\n\n \"Open file\"\n f = open(fileName)\n\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n while line:\n if(line[0]!=\"%\"):\n ls =line.split(' ')\n num,nums=int(ls[0]),int(ls[1])\n G.add_edge(num,nums)\n line = f.readline().rstrip(\"\\n\").rstrip(\"\\r\")\n\n \"Closing the file\"\n f.close()\n\n return G, 'Zachary'", "def build_graph(file_name):\n graph = MyGraph()\n with open(file_name, 'r') as fin:\n line = fin.readline().replace('\\n', '')\n while line != \"\":\n vals = line.split(':')\n graph.add_node(vals[0], pos=(int(vals[1]),int(vals[2])))\n line = fin.readline().replace('\\n', '')\n dest = fin.readline().replace('\\n','').split('\\t')\n line = fin.readline().replace('\\n', '')\n edges = []\n while line != '':\n node_info = line.split('\\t')\n src = node_info[0]\n for node in range(1,len(node_info)):\n if node_info[node] != '':\n if (dest[node],src) not in edges:\n edges.append((src,dest[node], node_info[node]))\n line = fin.readline().replace('\\n','')\n for edge in edges:\n graph.add_edge(edge[0], edge[1], weight=int(edge[2]))\n\n return graph", "def read_graph_file(filename):\n nodes, edges = [], []\n with open(filename) as f1:\n numNodes = int(f1.readline())\n numEdges = int(f1.readline())\n nodes = np.zeros([numNodes,3], dtype=\"float32\")\n edges = np.zeros([numEdges,2], dtype=\"int32\")\n nodeCount = 0\n edgeCount = 0\n for line in f1:\n parts = line.split(\" \")\n if len(parts) == 4:\n # node line\n nodes[nodeCount] = (float(parts[0]), float(parts[1]), int(parts[3])) \n nodeCount += 1\n elif len(parts) == 3:\n # edge line\n edges[edgeCount] = (int(parts[0]), int(parts[1])) \n edgeCount += 1\n return nodes, edges", "def build_graph(filepath):\n graph = defaultdict(list)\n with open(filepath, 'r') as file:\n for edge in file:\n head, tail = edge.split()\n graph[head].append(tail)\n return graph", "def load_graph(file_name, directed=True):\n G = nx.DiGraph() if directed else nx.Graph()\n with open(file_name, \"r\") as f:\n for line in f:\n tokens = line.split()\n u = int(tokens[0])\n v = int(tokens[1])\n if len(tokens) > 2:\n w = float(tokens[2])\n G.add_edge(u, v, weight=w)\n else:\n G.add_edge(u,v)\n return G", "def from_file(cls, filename: str, directed = False):\n with open(filename) as fh:\n vertnum = int(fh.readline().strip())\n int(fh.readline().strip())\n graph = Graph(vertnum, directed)\n\n for line in fh:\n numstr = line.split()\n v1 = int(numstr[0])\n v2 = int(numstr[1])\n graph.add_edge(v1, v2)\n\n return graph", "def read_graph(path):\n edge_list = pd.read_csv(path).values.tolist()\n graph = nx.from_edgelist(edge_list)\n return graph", "def read_file():\n\tgraph = {}\n\twith open('data/SCC.txt', 'r') as f:\n\t\told_index = '1'\n\t\tadjacency_list = []\n\t\tfor line in f:\n\t\t\tdata = line.split()\n\t\t\tnew_index = data[0]\n\t\t\tif old_index != new_index:\n\t\t\t\tgraph[old_index] = {'adj_nodes': adjacency_list, 'is_explored': False}\n\t\t\t\told_index = new_index\n\t\t\t\tadjacency_list = []\n\t\t\tadjacency_list.append(data[1])\n\t\tgraph[old_index] = {'adj_nodes': adjacency_list, 'is_explored': False}\n\n\tfor i in range(1, NUM_VERT + 1):\n\t\tif graph.get(str(i), False) is False:\n\t\t\tgraph[str(i)] = {'adj_nodes': [], 'is_explored': False}\n\treturn graph", "def create_graph(path):\n f = open(path, 'r')\n g = nx.DiGraph()\n\n # Velikost mnozice\n n = int(f.readline().split(\" \")[0])\n\n # Dodamo vsa vozlisca v graf\n for i in range(n):\n g.add_node(i+1)\n\n # Gremo cez vse primerjave in dodamo povezave v graf\n for line in f:\n u, v = line.strip().split(\" \")\n u, v = int(u), int(v)\n g.add_edge(u, v)\n\n return g", "def get_graph(path: str) -> nx.Graph:\n with open(path, 'r') as f:\n list_of_edges = [line.strip().split() for line in f.readlines()]\n g = nx.Graph()\n g.add_edges_from(list_of_edges)\n return g", "def read_graph(filename):\n\n print(\"\\n\\n========== Loading graph: \" + filename + '==================')\n edges = {}\n\n inFile = open(filename)\n for line in inFile:\n roadInfo = line.split()\n\n # Skip blank lines, read in contents from non-empty lines.\n if (len(roadInfo) > 0):\n srcCity = roadInfo[0]\n destCity = roadInfo[1]\n\n if srcCity in edges:\n edges[srcCity] = edges[srcCity] + [destCity]\n else:\n edges[srcCity] = [destCity]\n\n if destCity in edges:\n edges[destCity] = edges[destCity] + [srcCity]\n else:\n edges[destCity] = [srcCity]\n\n print(\" done.\\n\")\n return edges", "def ParseGraph(filename):\n vertices = []\n edges = set([])\n\n for l in open(filename):\n fields = [int(f) for f in l.split()]\n vertex = fields.pop(0)\n incident = [tuple(sorted([vertex, f])) for f in fields]\n vertices.append(vertex)\n edges.update(incident)\n\n return vertices, list(edges)", "def read(self):\n\t\tentities = dict()\n\t\trelations = set()\n\t\tedges = set()\n\t\twith open(self.file_path, encoding=\"utf-8\") as f:\n\t\t\tfor line in tqdm(f):\n\t\t\t\tif(self.prob == 1.0 or random() < self.prob):\n\t\t\t\t\tsource, relation, target, _ = line.split(\" \", 3)\n\t\t\t\t\tis_dataprop = target.startswith('\"')\n\t\t\t\t\tif source not in entities:\n\t\t\t\t\t\tentities[source] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\tentities[source][\"out_degree\"] += 1\n\t\t\t\t\tentities[source][\"degree\"] += 1\n\t\t\t\t\tif not is_dataprop:\n\t\t\t\t\t\tif target not in entities:\n\t\t\t\t\t\t\tentities[target] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\t\tentities[target][\"in_degree\"] += 1\n\t\t\t\t\t\tentities[target][\"degree\"] += 1\n\t\t\t\t\t\trelations.add(relation)\n\t\t\t\t\t\tedges.add((relation, source, target))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(self.include_dataprop):\n\t\t\t\t\t\t\tentities[source][\"data_properties\"][relation] = target\n\n\t\treturn (entities, relations, edges)", "def read_graph_g2o(filename):\n Edge = namedtuple(\n 'Edge', ['Type', 'fromNode', 'toNode', 'measurement', 'information'])\n edges = []\n nodes = {}\n with open(filename, 'r') as file:\n for line in file:\n data = line.split()\n\n if data[0] == 'VERTEX_SE2':\n nodeId = int(data[1])\n pose = np.array(data[2:5], dtype=np.float32)\n nodes[nodeId] = pose\n\n elif data[0] == 'VERTEX_XY':\n nodeId = int(data[1])\n loc = np.array(data[2:4], dtype=np.float32)\n nodes[nodeId] = loc\n\n elif data[0] == 'EDGE_SE2':\n Type = 'P'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:6], dtype=np.float32)\n uppertri = np.array(data[6:12], dtype=np.float32)\n information = np.array(\n [[uppertri[0], uppertri[1], uppertri[2]],\n [uppertri[1], uppertri[3], uppertri[4]],\n [uppertri[2], uppertri[4], uppertri[5]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n elif data[0] == 'EDGE_SE2_XY':\n Type = 'L'\n fromNode = int(data[1])\n toNode = int(data[2])\n measurement = np.array(data[3:5], dtype=np.float32)\n uppertri = np.array(data[5:8], dtype=np.float32)\n information = np.array([[uppertri[0], uppertri[1]],\n [uppertri[1], uppertri[2]]])\n edge = Edge(Type, fromNode, toNode, measurement, information)\n edges.append(edge)\n\n else:\n print('VERTEX/EDGE type not defined')\n\n # compute state vector and lookup table\n lut = {}\n x = []\n offset = 0\n for nodeId in nodes:\n lut.update({nodeId: offset})\n offset = offset + len(nodes[nodeId])\n x.append(nodes[nodeId])\n x = np.concatenate(x, axis=0)\n\n # collect nodes, edges and lookup in graph structure\n graph = Graph(x, nodes, edges, lut)\n print('Loaded graph with {} nodes and {} edges'.format(\n len(graph.nodes), len(graph.edges)))\n\n return graph", "def load_graph(path, n):\n\n f = open(path)\n g = nx.Graph()\n\n for line in f:\n parts = line.split()\n node1 = int(parts[0])\n node2 = int(parts[1])\n\n if node1 >= n:\n break\n\n if node1 <= n and node2 <= n:\n g.add_edge(node1, node2)\n\n f.close()\n return g", "def make_graph_from_file(filename):\n file = open(filename, \"r\")\n lines = file.readlines()\n file.close()\n\n # Check if it is a graph or digraph\n graph_or_digraph_str = lines[0].strip() if len(lines) > 0 else None\n if graph_or_digraph_str != \"G\" and graph_or_digraph_str != \"D\":\n raise Exception(\"File must start with G or D.\")\n is_bidirectional = graph_or_digraph_str == \"G\"\n\n g = Graph()\n\n # Add all vertices\n for vertex_key in lines[1].strip(\"() \\n\").split(\",\"):\n g.add_vertex(vertex_key)\n\n # Add all edges\n for line in lines[2:]:\n # Split components of edge\n new_edge = line.strip(\"() \\n\").split(\",\")\n if len(new_edge) < 2 or len(new_edge) > 3:\n raise Exception(\"Lines adding edges must include 2 or 3 values\")\n\n # Get vertices\n vertex1, vertex2 = new_edge[:2]\n\n # Get weight if it exists\n weight = int(new_edge[2]) if len(new_edge) == 3 else None\n\n # Add edge(s)\n g.add_edge(vertex1, vertex2, weight)\n if is_bidirectional:\n g.add_edge(vertex2, vertex1, weight)\n\n return g\n # Check if first line is 'G' or 'D' and store the value. If neither, raise an exception\n # For each vertex id in first line, add a vertex to the graph\n # For each of the following lines:\n # Extract the vertex ids and the (optional) weight, and add an edge to the graph\n # If it is a Graph and not a Digraph, add another edge in the opposite direction\n # Raise an exception if line contains too many (or too few) item\n raise Exception(f\"File must begin with G or D, found {firstline}\")", "def parse_graphml_file(filename: str, digraph=True):\n graphml_graph = nx.read_graphml(filename)\n if digraph:\n graphml_graph = graphml_graph.to_directed()\n\n return graphml_graph", "def loadNetworkFromFile(self, file):\r\n for line in open(file, 'r'):\r\n fromVertex, toVertex, capacity = map(int, line.split())\r\n self.addEdge(fromVertex, toVertex, capacity)", "def load_graph(file_path):\n with open(file_path, 'rb') as dicts_file:\n graph = pickle.load(dicts_file)\n return collections.OrderedDict(sorted(graph.items()))", "def construct_hypermap_from_file(path):\n r = {}\n with open(path, \"r\") as f:\n for line in f.readlines():\n id, theta, radius = line.split()\n p = HybolicPara(radius=float(radius), theta=float(theta))\n r[id] = p\n logging.debug(\"{} nodes and hyperbolic parameter loaded from {}.\".format\n (len(r), path))\n return r", "def read(path):\n result = []\n for line in open(path):\n width, gap, edge = [float(e) / 10 for e in line.split(',')]\n # we can avoid edge length since we know height and width of paralellogram\n result.append((width, gap))\n result.reverse()\n return result", "def read_metis_graph(filename):\n graph = nx.Graph()\n myfile = open(filename, 'r')\n line_number = 0\n for line in myfile:\n line_number += 1\n line_split = [int(i) for i in line.split()]\n if line_number == 1:\n no_nodes = line_split[0]\n no_edges = line_split[1]\n else:\n node_weight = int(line_split[0])\n node = line_number - 2 # node index start at 0\n graph.add_node(node)\n graph.node[node]['volume'] = node_weight\n for i in xrange(1, len(line_split)-1, 2):\n neigh = int(line_split[i])-1 # minus 1 for node indexing\n edge_weight = int(line_split[i+1])\n graph.add_edge(node, neigh, weight=edge_weight) \n assert nx.number_of_nodes(graph) == no_nodes\n assert nx.number_of_edges(graph) == no_edges\n return graph", "def graph_reader(path):\n graph = nx.from_edgelist(pd.read_csv(path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def _read_data(filename):\n file = open(filename, \"r\")\n timestamps = []\n edges = []\n for line in file:\n # source target weight timestamp\n if line.startswith(\"%\"):\n continue\n spl = line.split()\n if len(spl) == 4:\n # store that stuff in triples (source, target, weight, timestamp)\n edges.append((int(spl[0]), int(spl[1]), int(spl[2]), int(spl[3])))\n timestamps.append(int(spl[3]))\n return edges, sorted(timestamps)", "def dependency_parse_to_graph(filename):\n data = ''\n dtree = []\n with open(filename, 'r') as f:\n for line in f:\n if line[0] != '#':\n if 'root' in line:\n elements = line.split('\\t')\n if elements[7] == 'root':\n elements[7] = 'ROOT'\n line = '\\t'.join(elements)\n data += line\n if line == '\\n':\n dg = DependencyGraph(data.decode('utf8'))\n dtree.append(dg)\n data = ''\n return dtree", "def build_from_file(self, topology_file, topology_format):\n with open(topology_file) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n else:\n if topology_format == 0:\n x = line.split(\"\\n\")[0].split(\"|\")\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = int(x[2])\n else:\n x = line.split(\"\\n\")[0].split(\"\\t\")\n if x[2] == \"p2c\":\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = -1\n elif x[2] == \"c2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = -1\n elif x[2] == \"p2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = 0\n else:\n continue\n\n if not self.has_edge(as1, as2):\n self.add_edge(as1, as2, relationship=relationship, as1=as1, as2=as2)", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def get_table_from_file(file_name):\n try:\n with open(file_name, \"r\") as file:\n lines = file.readlines()\n\n table = [element.replace(\"\\n\", \"\").split(\"\\t\") for element in lines]\n\n nodes = list()\n for node in table:\n new_node = []\n for coordinate in node:\n new_node.append(float(coordinate))\n\n nodes.append(new_node)\n\n return nodes\n\n except FileNotFoundError as f:\n raise f from None\n except Exception as e:\n raise e from None", "def read(cls, inputfilename):\n\n # import json\n # with open(inputfilename, 'w') as infile:\n # data = json.load(infile)\n # g = nx.readwrite.json_graph.node_link_graph(data)\n # return cls(network = g)\n return cls(network=nx.read_gpickle(inputfilename))", "def htk2dag(self, file_path):\n field_re = re.compile(r'(\\S+)=(?:\"((?:[^\\\\\"]+|\\\\.)*)\"|(\\S+))')\n open_fn = gzip.open if file_path.endswith('.gz') else open\n with open_fn(file_path, 'rt', encoding='utf-8') as fh:\n self.header = {}\n self.nframes = 0\n state = 'header'\n # Read everything\n for spam in fh:\n if spam.startswith('#'):\n continue\n fields = dict(map(lambda t: (t[0], t[1] or t[2]),\n field_re.findall(spam.rstrip())))\n # Number of nodes and arcs\n if 'N' in fields:\n num_nodes = int(fields['N'])\n self.nodes = [None] * num_nodes\n num_arcs = int(fields['L'])\n self.arcs = [None] * num_arcs\n state = 'items'\n if state == 'header':\n self.header.update(fields)\n else:\n # This is a node\n if 'I' in fields:\n idx = int(fields['I'])\n frame = int(float(fields['t']) * FRATE)\n var = int(fields['v']) if 'v' in fields else None\n node = self.Node(\n fields['W'].replace('\\\\', ''), frame, var)\n self.nodes[idx] = node\n if frame > self.nframes:\n self.nframes = frame\n # This is an arc\n elif 'J' in fields:\n idx = int(fields['J'])\n start_node = self.nodes[int(fields['S'])]\n end_node = self.nodes[int(fields['E'])]\n ascr = float(fields.get('a', 0))\n lscr = float(fields.get('l', 0))\n nscr = fields.get('n', [])\n if isinstance(nscr, str):\n nscr = [float(n) for n in nscr.split(',')]\n iscr = fields.get('i', [])\n if isinstance(iscr, str):\n iscr = [float(i) for i in iscr.split(',')]\n arc = self.Arc(\n start_node, end_node, ascr, lscr, nscr, iscr)\n self.arcs[idx] = arc\n # Link up existing nodes\n start_node.exits.append(arc)\n end_node.entries.append(arc)\n\n self.sort_nodes()", "def readmesh(filename):\n f = open(filename, 'rb')\n cells = []\n edges = []\n\n # create each cell and edge, etc\n for line in f:\n \n return cells, edges", "def read_graph(settings):\n if settings.edgelist_input:\n graph = nx.read_edgelist(settings.input)\n else:\n edge_list = pd.read_csv(settings.input).values.tolist()\n graph = nx.from_edgelist(edge_list)\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def load_graph(self, filename):\n try:\n file_extention = list(filename.split(\".\"))[-1]\n if file_extention == \"gml\":\n self.graph = nx.read_gml(filename)\n if file_extention == \"adjlist\":\n self.graph = nx.read_adjlist(filename)\n if file_extention == \"yaml\":\n self.graph = nx.read_yaml(filename)\n except Exception as e:\n print(\"Error in loading Graph file: The error is\", e)", "def main(infilename):\n graph = pydot.graph_from_dot_file(infilename)\n nodes = get_nodes_info(graph)\n load_links(graph, nodes)\n propagate_lineage(nodes)\n dump_status(nodes)", "def dag2htk(self, file_path):\n open_fn = gzip.open if file_path.endswith('.gz') else open\n with open_fn(file_path, 'wb') as fh:\n for k, v in self.header.items():\n string = '%s=%s\\n' % (k, v)\n fh.write(string.encode())\n fh.write(('N=%d\\tL=%d\\n' % (\n self.num_nodes(), self.num_arcs())).encode())\n mapping = {}\n for idx, node in enumerate(self.nodes):\n if node.var:\n string = 'I=%d\\tt=%.2f\\tW=%s\\tv=%d\\n' % (\n idx, node.entry/FRATE, node.sym, node.var)\n else:\n string = 'I=%d\\tt=%.2f\\tW=%s\\n' % (\n idx, node.entry/FRATE, node.sym)\n fh.write(string.encode())\n mapping[node] = idx\n for idx, arc in enumerate(self.arcs):\n string = 'J=%d\\tS=%d\\tE=%d\\ta=%.2f\\tl=%.3f' % (\n idx,\n mapping[arc.src],\n mapping[arc.dest],\n arc.ascr,\n arc.lscr,\n )\n if arc.nscr:\n string += '\\tn=' + ','.join(\n ['{:.3f}'.format(n) for n in arc.nscr])\n if arc.iscr:\n string += '\\ti=' + ','.join(\n ['{:.3f}'.format(i) for i in arc.iscr])\n string += '\\n'\n fh.write(string.encode())", "def LoadFromPajek(filepath, getlabels=False):\n # 0) OPEN THE FILE AND READ THE SIZE OF THE NETWORK\n pajekfile = open(filepath, 'r')\n firstline = pajekfile.readline()\n firstline = firstline.split()\n N = int(firstline[1])\n\n # 1) READ THE LABELS OF THE NODES IF WANTED\n if getlabels:\n labels = []\n\n # Security check, make sure that labels of nodes are listed in file\n line = pajekfile.readline()\n if line.split()[0] != '1':\n pajekfile.seek(1)\n print('LoadFromPajek() warning: No labels found to read.')\n\n # If labels are in file continue reading the labels.\n else:\n # If labels are wrapped in between quotes\n try:\n idx1 = line.index('\"') + 1\n # Add the first label\n idx2 = line[idx1:].index('\"')\n label = line[idx1:idx1+idx2]\n labels.append(label)\n\n # And now read the labels for the rest of the nodes\n for i in range(1,N):\n line = pajekfile.readline()\n idx1 = line.index('\"') + 1\n idx2 = line[idx1:].index('\"')\n label = line[idx1:idx1+idx2]\n labels.append(label)\n\n # Otherwise, make a wild guess of what the label is\n except ValueError:\n # Add the first label\n label = line.split()[1]\n labels.append(label)\n\n # And now read the labels of the rest of the nodes\n for i in range(1,N):\n line = pajekfile.readline()\n label = line.split()[1]\n labels.append(label)\n\n # 2) READ THE LINKS AND CREATE THE ADJACENCY MATRIX\n # 2.1) Find out whether the network is directed or undirected\n # while loop to skip empty lines if needed or the lines of the labels\n done = False\n while not done:\n line = pajekfile.readline()\n if line[0] == '*':\n if 'Edges' in line:\n directed = False\n elif 'Arcs' in line:\n directed = True\n else:\n print('Could not find whether network is directed or undirected')\n break\n done = True\n\n # 2.2) Read the first line contining a link\n line = pajekfile.readline()\n line = line.split()\n\n # If link information is BINARY, just read the adjacency list links\n if len(line) == 2:\n # 2.3) Declare the adjacency matrix and include the first link\n adjmatrix = np.zeros((N,N), np.uint8)\n i = int(line[0]) - 1\n j = int(line[1]) - 1\n adjmatrix[i,j] = 1\n if not directed:\n adjmatrix[j,i] = 1\n\n # 2.4) Include the rest of the links\n for line in pajekfile:\n i, j = line.split()\n i = int(i) - 1\n j = int(j) - 1\n adjmatrix[i, j] = 1\n if not directed:\n adjmatrix[j, i] = 1\n\n # If the link information is WEIGHTED, read the weighted links\n elif len(line) == 3:\n # 2.3) Find whether link weights are integer or floating poing\n i, j, aij = line\n outdtype = np.int\n try:\n outdtype(aij)\n except ValueError:\n outdtype = np.float\n\n # 2.4) Declare the adjacency matrix and include the first link\n adjmatrix = np.zeros((N, N), outdtype)\n i = int(i) - 1\n j = int(j) - 1\n adjmatrix[i, j] = outdtype(aij)\n if not directed:\n adjmatrix[j, i] = outdtype(aij)\n\n # 2.5) Read the rest of the file and fill-in the adjacency matrix\n for line in pajekfile:\n i, j, aij = line.split()\n i = int(i) - 1\n j = int(j) - 1\n adjmatrix[i, j] = outdtype(aij)\n if not directed:\n adjmatrix[j, i] = adjmatrix[i, j]\n\n # 3) CLOSE FILE AND RETURN RESULTS\n pajekfile.close()\n\n if getlabels:\n return adjmatrix, labels\n else:\n return adjmatrix", "def read_dot_file(dot_file_path):\n nodes = []\n edges = []\n with open(dot_file_path) as f:\n in_lines = f.readlines()\n for line in in_lines:\n # ignore arrow attributes\n line = line.split(sep=\"[\")[0]\n if \"->\" in line:\n split_list = line.split(sep=\"->\")\n # print(\"ffgg\", split_list)\n pa = split_list[0].strip()\n if pa not in nodes:\n nodes.append(pa)\n ch_list = split_list[1].split(\",\")\n ch_list = [x.strip().strip(\";\").strip() for x in ch_list]\n # print(\"ffgg\", pa)\n # print(\"ffgg\", ch_list)\n for ch in ch_list:\n edges.append((pa, ch))\n if ch not in nodes:\n nodes.append(ch)\n\n return nodes, edges", "def create_social_graph(file):\n social_graph = NonDirectionalGraph(\"SocialGraph\")\n with open(file, \"rt\") as f:\n data = f.readlines()\n n_friendship = 0 # Represents the number of friendships in the graph in each iteration\n highest_n_friendship = 0 # Captures the highest record of n_friendship in the graph\n highest_n_neighbors_per_node_dict = {} # Captures the highest record of friendship per node\n for line in data:\n split_line = line.split()\n if \"became\" in split_line: # \"became\" is in lines where persons become connected\n for name in [split_line[0], split_line[2]]:\n # The following if statement makes sure to instantiate the node and adds it to the graph\n if name not in social_graph:\n node = Node(name)\n social_graph.add_node(node)\n highest_n_neighbors_per_node_dict[name] = 0 ##\n social_graph.add_edge(split_line[0],split_line[2]) # Adds a connection between the nodes\n n_friendship += 1 # Updates the number of friendships\n # The following for loop updates the highest number of friends (neighbors) if it changes\n for name in [split_line[0], split_line[2]]:\n if len(social_graph.nodes[name].neighbors) > highest_n_neighbors_per_node_dict[name]:\n highest_n_neighbors_per_node_dict[name] = len(social_graph.nodes[name].neighbors)\n elif \"cancelled\" in split_line: # \"became\" is in lines where persons become disconnected\n social_graph.remove_edge(split_line[0], split_line[2])\n n_friendship -= 1 # Updates the number of friendships\n # In case any of the words \"cancelled\" or \"became\" is in the line\n else:\n print(\"Unrecognized line\")\n # The following for loop updates the highest number of friendship if it changes\n if n_friendship > highest_n_friendship:\n highest_n_friendship = n_friendship\n return social_graph, highest_n_friendship, highest_n_neighbors_per_node_dict", "def load_file(filename):\n # Create matrix from csv lines\n with open(filename) as f:\n m = [list(map(int, line.split(','))) for line in f]\n # Create digraph from matrix\n graph = utils.graph.DiGraph()\n ROWS = len(m)\n COLS = len(m[0])\n for r in range(ROWS):\n for c in range(COLS):\n u = (r, c)\n # Add add to node to the right\n if c+1 < COLS:\n v = (r, c+1)\n weight = m[r][c+1]\n graph.add_edge(u, v, weight)\n # Add add to node below\n if r+1 < ROWS:\n v = (r+1, c)\n weight = m[r+1][c]\n graph.add_edge(u, v, weight)\n # Add add to node above\n if 0 <= r-1:\n v = (r-1, c)\n weight = m[r-1][c]\n graph.add_edge(u, v, weight)\n # also add a start element and create edges to first column\n start_node = 'START'\n for row in range(ROWS):\n node = (row, 0)\n weight = m[row][0]\n graph.add_edge(start_node, node, weight)\n # also add an end element and create edges to the list column\n end_node = 'END'\n c = COLS-1\n for row in range(ROWS):\n node = (row, c)\n weight = 0 # Valid?\n graph.add_edge(node, end_node, weight)\n return graph, start_node, end_node", "def read_gro(filename):\n top = Topology()\n\n with open(filename, \"r\") as gro_file:\n top.name = str(gro_file.readline().strip())\n n_atoms = int(gro_file.readline())\n coords = u.nm * np.zeros(shape=(n_atoms, 3))\n for row, _ in enumerate(coords):\n line = gro_file.readline()\n content = line.split()\n if not line:\n msg = (\n \"Incorrect number of lines in .gro file. Based on the \"\n \"number in the second line of the file, {} rows of\"\n \"atoms were expected, but at least one fewer was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n res = content[0]\n atom_name = content[1]\n atom_id = content[2]\n coords[row] = u.nm * np.array(\n [\n float(content[3]),\n float(content[4]),\n float(content[5]),\n ]\n )\n site = Atom(name=atom_name, position=coords[row])\n\n r = re.compile(\"([0-9]+)([a-zA-Z]+)\")\n m = r.match(res)\n site.molecule = (m.group(2), int(m.group(1)))\n site.residue = (m.group(2), int(m.group(1)))\n top.add_site(site, update_types=False)\n top.update_topology()\n\n # Box information\n line = gro_file.readline().split()\n top.box = Box(u.nm * np.array([float(val) for val in line[:3]]))\n\n # Verify we have read the last line by ensuring the next line in blank\n line = gro_file.readline()\n if line:\n msg = (\n \"Incorrect number of lines in input file. Based on the \"\n \"number in the second line of the file, {} rows of atoms \"\n \"were expected, but at least one more was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n return top", "def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())", "def readMovieData(filename):\r\n graph = Graph()\r\n with open(filename, \"r\", encoding=\"latin-1\") as ins:\r\n array = []\r\n delimiter = '/'\r\n for line in ins:\r\n names = line.split(delimiter)\r\n array.append(names)\r\n for i in range(1, len(names)):\r\n graph.addEdge(names[0], names[i])\r\n return graph", "def read_file(self) -> None:\n if not os.path.exists(self.location) or not os.path.isfile(\n self.location\n ):\n raise FileNotFoundError(self.location)\n\n self.graph = rdflib.Graph()\n try:\n if self.file_type is None:\n self.graph.parse(\n self.location, format=self.location.split(\".\")[-1]\n )\n else:\n self.graph.parse(self.location, format=self.file_type)\n except Exception:\n self.graph.parse(self.location)\n\n for (s, p, o) in self.graph:\n if p not in self.label_predicates:\n s_v = Vertex(str(s))\n o_v = Vertex(str(o))\n p_v = Vertex(str(p), predicate=True, vprev=s_v, vnext=o_v)\n self.add_vertex(s_v)\n self.add_vertex(p_v)\n self.add_vertex(o_v)\n self.add_edge(s_v, p_v)\n self.add_edge(p_v, o_v)", "def trace_to_graph(infile):\n assert '.tr' in infile\n outfile = infile.replace('.tr', '.graph')\n\n infile = open(infile)\n outfile = open(outfile, 'w')\n\n for line in infile:\n if line.startswith('module:'):\n tokens = line.split(' ')\n module_name = tokens[1]\n outfile.write('module: %s' % module_name)\n continue\n elif line.startswith('num_nodes:'):\n tokens = line.split(' ')\n num_nodes = int(tokens[1])\n outfile.write('%s\\n' % num_nodes)\n continue\n elif not line.startswith('ID:'): # skip the header lines\n continue\n elif 'DST:' not in line: # skip single qubit gates\n continue\n\n tokens = line.split(' ')\n assert tokens[4].startswith('SRC:')\n assert tokens[6].startswith('DST:')\n src = int(tokens[5])\n dst = int(tokens[7])\n assert src < num_nodes\n assert dst < num_nodes\n outfile.write('%s %s\\n' % (src, dst))", "def read_graph(graphclass, f: IO[str]) -> Tuple[Graph, List[str], bool]:\n options = []\n\n while True:\n try:\n line = read_line(f)\n n = int(line)\n graph = graphclass(directed=False, n=n)\n break\n except ValueError:\n if len(line) > 0 and line[-1] == '\\n':\n options.append(line[:-1])\n else:\n options.append(line)\n\n line = read_line(f)\n edges = []\n\n try:\n while True:\n comma = line.find(',')\n if ':' in line:\n colon = line.find(':')\n edges.append((int(line[:comma]), int(line[comma + 1:colon]), int(line[colon + 1:])))\n else:\n edges.append((int(line[:comma]), int(line[comma + 1:]), None))\n line = read_line(f)\n except Exception:\n pass\n\n indexed_nodes = list(graph.vertices)\n\n for edge in edges:\n graph += Edge(indexed_nodes[edge[0]], indexed_nodes[edge[1]], edge[2])\n\n if line != '' and line[0] == '-':\n return graph, options, True\n else:\n return graph, options, False", "def read_edges(f=sys.stdin):\n edges = []\n k = ['first', 'last', 'capacity', 'flow', 'used']\n lines = f.readlines()\n for line in lines:\n v = [int(s) for s in line.split(\" \")] + [0, False]\n edges.append(dict(zip(k,v)))\n \n\n return edges", "def read_from_file(self, filename):\n with open(filename, 'r') as f:\n for line in f.read().splitlines():\n name, neighbours, r_table = line.split('!')\n\n self.add_new(name)\n if neighbours:\n for neighbour in neighbours.split(';'):\n try:\n self.add_neighbours(name, neighbour)\n except Exception as e:\n\n pass\n if r_table:\n for network in r_table.split(';'):\n net_name, distance = network.split(':')\n\n distance = int(distance)\n self.add_network(name, net_name, distance)", "def load_network(adj_network):\n fin = open(adj_network, \"r\")\n graph = collections.defaultdict(list)\n for line in tqdm(fin):\n line = line.replace(\"\\n\", \"\")\n args = line.split(\"\\t\")\n node = args[0].replace(\"(\", \"\")\n node = node.replace(\")\", \"\")\n node = int(node)\n assert node not in graph\n graph[node] = list(map(float, args[1:]))\n return graph", "def load_edgl(fname):\n # Reads edges\n df = pd.read_csv(fname, sep=\" \", header=None, usecols=[0, 1])\n # Convert to list of tuples\n return list(df.itertuples(index=False, name=None))", "def _read_netgen(self, file):\n\n with open(file, 'r') as f:\n\n aid = 0 # current arc ID\n\n # Read the file line-by-line\n for line in f:\n\n # Decide what to do based on the line prefix\n\n # Comment line\n if line[0] == 'c':\n # Skip\n continue\n\n # Problem info\n elif line[0] == 'p':\n # p sense #nodes #arcs #int int_type #defenses #attacks\n # We always assume that the sense is minimization\n\n ls = line.split()\n if ls[5] == 'n':\n self.parent_type = 0\n self.def_limit = int(ls[6])\n self.att_limit = int(ls[7])\n\n # Initialize all nodes as transshipment (in case the NETGEN\n # file lists only nonzero supply values)\n self.nodes = [_Node(i, 0.0) for i in range(int(ls[2]))]\n\n # Node\n elif line[0] == 'n':\n # n ID supply\n\n # All nodes have already been defined, so update existing\n # supply values\n\n ls = line.split()\n self.nodes[int(ls[1])-1].supply = float(ls[2])\n\n # Arc\n elif line[0] == 'a':\n # a tail head LB UB cost\n\n ls = line.split()\n tail = self.nodes[int(ls[1])-1]\n head = self.nodes[int(ls[2])-1]\n if (int(ls[2]) == 0) and (self.parent_type == 0):\n head = None\n\n self.arcs.append(_Arc(aid, tail, head, float(ls[4]),\n float(ls[5])))\n aid += 1\n\n # Interdependency\n elif line[0] == 'i':\n # i parent child\n\n ### We assume for now that arcs are parents.\n\n ls = line.split()\n self.int.append((self.arcs[int(ls[1])-1],\n self.arcs[int(ls[2])-1]))\n\n # Defensible arc\n elif line[0] == 'd':\n # d arc\n\n ls = line.split()\n self.def_arcs.append(self.arcs[int(ls[1])-1])\n\n # All defensible arcs are assumed to be destructible\n self.att_arcs.append(self.arcs[int(ls[1])-1])\n\n # Destructible arc\n elif line[0] == 'r':\n # r arc\n\n ls = line.split()\n self.att_arcs.append(self.arcs[int(ls[1])-1])\n\n # If no defensible or destructible arcs were listed, we assume that\n # all arcs are available\n\n if len(self.def_arcs) == 0:\n self.def_arcs[:] = self.arcs[:]\n\n if len(self.att_arcs) == 0:\n self.att_arcs[:] = self.def_arcs[:]", "def import_graph(cls, filename, node_cls=GraphNode):\n with open(filename, 'r') as file:\n num_nodes = None\n graph = {}\n for line in file:\n if num_nodes is None:\n num_nodes = int(line)\n graph = {id_: node_cls(id_) for id_ in range(1, num_nodes + 1)}\n else:\n m, n, dist = line.split(' ')\n m = int(m)\n n = int(n)\n dist = float(dist)\n graph[m].neighbours[n] = graph[n]\n graph[n].neighbours[m] = graph[m]\n graph[m].distances[n] = dist\n graph[n].distances[m] = dist\n return graph", "def read_file(network_filename, user_by_city_filename=None):\n graph = read_dictlist_from_file(network_filename)\n\n gg = Graph(directed=False) # new Graph object\n\n user_id_map = {} # storing new id info\n new_id = 0\n for user_id in graph:\n temp_users = []\n temp_users.append(user_id)\n for friend in graph[user_id]:\n temp_users.append(friend)\n for id1 in temp_users:\n if id1 not in user_id_map:\n user_id_map[id1] = new_id\n gg.add_vertex() # index for this vertex will be new_id\n new_id += 1\n if id1 > user_id:\n gg.add_edge(gg.vertex(user_id_map[user_id]),\n gg.vertex(user_id_map[id1]))\n print \"Done reading the graph.\"\n if user_by_city_filename is None:\n return (gg, None)\n if user_by_city_filename is not None:\n cities = read_dict_from_file(user_by_city_filename)\n # Adding vertex property as city\n city_prop = gg.new_vertex_property(\"int\")\n for user_id in cities:\n city_prop[gg.vertex(user_id_map[user_id])] = cities[user_id]\n print \"Done reading the city.\"\n return (gg, city_prop)", "def get_graph(filename, data_folder):\n g = nx.MultiGraph()\n with open(data_folder + \"/\" + filename) as fp:\n line = fp.readline()\n while line:\n (o, d, t, e) = line.split()\n g.add_edge(int(o), int(d), start=int(t), duration=int(e))\n line = fp.readline()\n return g", "def ReadGraph(inputFileName):\n inputFile = open(inputFileName)\n jsonGraphArray = json.load(inputFile)\n graph = Graph.Graph()\n graph.load_from_json(jsonGraphArray)\n inputFile.close()\n return graph", "def assemble(filename):\n count = 0\n index = { } # Keys are names, values are graph indices.\n with open(filename, 'r') as f:\n size = int(f.readline().strip())\n G = Graph(size)\n for line in f:\n words = line.split()\n s = index.get(words[0], None)\n d = index.get(words[1], None)\n if s == None: # Here we are\n s = index[words[0]] = count # making sure\n count += 1 # we use index\n if d == None: # that is not\n d = index[words[1]] = count # yet in the\n count += 1 # graph.\n w = int(words[2])\n G.add(s,d,w)\n return G,index", "def load_graph(input_file=None, input_list=None):\n G = nx.Graph()\n if input_file:\n with open(input_file, 'r') as file:\n for line in file.readlines():\n line = line.strip().split(\" \")\n G.add_edge(line[0], line[1])\n elif input_list:\n G.add_edges_from(input_list)\n return G", "def load_graph(graph_path):\n graph = nx.from_edgelist(pd.read_csv(graph_path).values.tolist())\n graph.remove_edges_from(graph.selfloop_edges())\n return graph", "def read_pslg(file_name):\n with open(file_name, \"r\") as fp:\n components = fp.read().split(\"component: \")[1:]\n \n lpoints, lconnect_left, lconnect_right = [], [], []\n index = 0\n \n for component in components:\n raw_points = component.split(\"\\n\")[1:-1]\n points = [list(map(float, line.split()[1:3])) for line in raw_points]\n connect_left = np.roll(np.arange(index, index + len(raw_points), 1), -1)\n connect_right = np.roll(connect_left, 2)\n \n lpoints = lpoints + points\n lconnect_left.append(connect_left)\n lconnect_right.append(connect_right)\n \n index += len(raw_points)\n \n return np.array(lpoints), np.hstack(lconnect_left).astype(int), np.hstack(lconnect_right).astype(int)", "def get_imc_topo(topo_file):\n topo_graph = nx.Graph()\n with open(topo_file, 'r') as f:\n for line in f.readlines():\n if (len(line) > 10) and (line[0] != '#'):\n split_data = line.split()\n source = split_data[0]\n dest = split_data[2]\n #capacity = 1000 # We are fixing this to one.\n capacity = get_imc_capacity(split_data[1], split_data[3])\n if not topo_graph.has_edge(source, dest):\n topo_graph.add_edge(source, dest, capacity = capacity)\n # Checks graph for any componnets and returns the largest one.\n topo_graph = validate_graph(topo_graph)\n f.close()\n return topo_graph", "def _read_network_file(in_name, in_format=\"\", directed=False):\n\n if in_format == 'edges':\n if directed:\n g = nx.read_edgelist(in_name, create_using=nx.DiGraph())\n else:\n g = nx.read_edgelist(in_name, data=False)\n elif in_format == 'gefx':\n g = nx.read_gexf(in_name)\n elif in_format == 'gml':\n g = nx.read_gml(in_name)\n elif in_format == 'graphML' or in_format == 'graphml':\n g = nx.read_graphml(in_name)\n nodesInfo = g.nodes(data=True)\n if len(nx.get_node_attributes(g,\"label\"))>0:\n node2Label = {nodeid: data[\"label\"].replace(\" \",\"_\") for (nodeid, data) in nodesInfo}\n g = nx.relabel_nodes(g, node2Label, copy=False)\n elif in_format == 'pajek':\n g = nx.read_pajek(in_name)\n elif in_format == 'ncol':\n g = nx.read_edgelist(in_name)\n else:\n raise Exception(\"UNKNOWN FORMAT \" + in_format)\n return g", "def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numnod = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.nodes = []\n\n for _ in range(self.numnod):\n node = FRDNode()\n self.nodes.append(node)\n if self.format < 2:\n in_file.read(1)\n node.key = int(in_file.read(2))\n node.number = int(in_file.read(5*(self.format+1)))\n node.pos = [float(in_file.read(12)) for j in range(3)]\n in_file.read(1) # eol\n else:\n node.number = struct.unpack('i', in_file.read(4))[0]\n if self.format == 2:\n node.pos = struct.unpack('fff', in_file.read(12))\n else:\n node.pos = struct.unpack('ddd', in_file.read(24))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def depth_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n width = np.fromfile(f,dtype=np.int32,count=1)[0]\n height = np.fromfile(f,dtype=np.int32,count=1)[0]\n size = width*height\n assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)\n depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width))\n return depth", "def get_topology(file):\n with open(file, 'r') as stream:\n try:\n topo_dict = yaml.load(stream)\n return topo_dict\n except (yaml.YAMLError, KeyError):\n return [] # TODO: give user feedback", "def treefile(filename):\n nobv.visual_treefile(filename)", "def nodes_from_ascii(basename, in_nodes=False):\n ascii_path = Path(\"/data/graphs\") / basename / (\"ascii.graph-txt\")\n assert ascii_path.exists(), \"Graph not found!\"\n with ascii_path.open() as f:\n line = f.readline()\n V = int(line.split()[0])\n print(\"{} vertices\".format(V))\n print(\"reading..\")\n nodes = [list() for i in range(V)]\n singleton = 0\n for i in trange(V):\n line = f.readline()\n if line[0] == \"\\n\":\n singleton += 1\n else:\n if in_nodes:\n for node in line.split():\n nodes[int(node)].append(i)\n else:\n nodes[i] = [int(j) for j in line.split()]\n print(\"Found {} singleton nodes\".format(singleton))\n return nodes", "def read_pot(filename, subset=None) -> List[Graph]:\n with open(filename, 'r') as f:\n # Maps <tuple of variable indices> -> Factor, so we can quickly determine whether to add the line to an existing\n # factor or create a new factor of ones (there should only be one factor per set of variable indices per graph).\n\n # TODO if the factor doesn't appear at all, it is never created. Do we need to create factors of all ones?\n\n factors = {}\n largest_var = None\n n = 0\n for i, line in enumerate(f):\n if not line.strip():\n # If we see a blank line, this indicates the end of this graph, so yield it and then start building\n # the next graph.\n if factors.values():\n if not subset or n in subset:\n yield Graph(factors.values())\n if n == sorted(subset)[-1]:\n return\n n += 1\n factors = {}\n largest_var = None\n continue\n\n try:\n fields = line.split()\n if largest_var is not None:\n assert int(fields[0]) >= largest_var, \"Variables should be listed in increasing order\"\n\n largest_var = int(fields[0])\n values = fields[1:-1]\n log_p = float(fields[-1])\n\n vars = tuple(range(largest_var - len(values) + 1, largest_var + 1))\n indices = tuple([LETTERS.index(v) for v in values])\n\n if vars in factors:\n factors[vars].table[indices] = np.exp(log_p)\n else:\n table = np.ones((len(LETTERS),) * len(vars))\n table[indices] = np.exp(log_p)\n factors[vars] = Factor(vars, table)\n except:\n print(\"Unexpected line format '{}', assuming end of file\".format(line))\n break", "def flow_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n width = np.fromfile(f,dtype=np.int32,count=1)[0]\n height = np.fromfile(f,dtype=np.int32,count=1)[0]\n size = width*height\n assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)\n tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2))\n u = tmp[:,np.arange(width)*2]\n v = tmp[:,np.arange(width)*2 + 1]\n return u,v", "def flow_read(filename):\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n width = np.fromfile(f,dtype=np.int32,count=1)[0]\n height = np.fromfile(f,dtype=np.int32,count=1)[0]\n size = width*height\n assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)\n tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2))\n u = tmp[:,np.arange(width)*2]\n v = tmp[:,np.arange(width)*2 + 1]\n return u,v", "def load_map(map_filename):\n\n data_file = open(map_filename, 'r')\n map_graph = Digraph()\n for line in data_file:\n source_node, dest_node, total_distance, outdoor_distance = line.split(' ')\n source = Node(source_node)\n dest = Node(dest_node)\n edge = WeightedEdge(source, dest, int(total_distance), int(outdoor_distance))\n if not map_graph.has_node(source): # checking if the node is already added to graph.\n map_graph.add_node(source)\n if not map_graph.has_node(dest): # checking if the node is already added to graph.\n map_graph.add_node(dest)\n map_graph.add_edge(edge)\n print(\"Loading map from file...\")\n return map_graph", "def flow_read(filename):\n TAG_FLOAT = 202021.25\n\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, \\\n ' flow_read:: Wrong tag in flow file ' +\\\n '(should be: {0}, is: {1}). Big-endian machine?'.format(TAG_FLOAT,check) +\\\n ' | Filename: {}'.format(filename)\n width = np.fromfile(f,dtype=np.int32,count=1)[0]\n height = np.fromfile(f,dtype=np.int32,count=1)[0]\n size = width*height\n assert width > 0 and height > 0 and size > 1 and size < 100000000, \\\n ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)\n\n tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2))\n u = tmp[:,np.arange(width)*2]\n v = tmp[:,np.arange(width)*2 + 1]\n return np.dstack((u,v))", "def readAdjacencyGraph(self,filename):\n try:\n for line in open(filename,'r'):\n incoming,outgoing=line.strip().split(\":\")\n no_outgoing=outgoing.split(\",\")\n self.adjacencyMetadata[incoming]=dict(zip(no_outgoing,range(len(no_outgoing))))\n if incoming not in self.adjacency.keys():\n self.adjacency[incoming]=None\n for item in no_outgoing:\n if item not in self.adjacency.keys():\n self.adjacency[item]=None\n except Exception as e:\n raise", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def load_graph(filename):\n with tf.gfile.FastGFile(filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')", "def init_from_file(filename, parser=int):\n filename = filename + \".\" + str(PID)\n\n def __parser_couple(s):\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n ss = s.split(\",\")\n return int(ss[0]), int(ss[1])\n\n p = PTree()\n content = SList([])\n with open(filename, \"r\") as f:\n count_line = 0\n for line in f:\n if line.strip()[0] == '#':\n continue\n # __distribution: PID -> nb of segments\n # __global_index: num seg -> (start, offset)\n if count_line == 0: # Get the distribution\n p.distribution = SList.from_str(line)\n p.start_index = p.distribution.scanl(lambda x, y: x + y, 0)[PID]\n p.nb_segs = p.distribution[PID]\n elif count_line == 1: # Get the global_index\n p.global_index = SList.from_str(line, parser=__parser_couple)\n else: # Get the content\n content.extend(Segment.from_str(line, parser=parser))\n count_line = count_line + 1\n p.content = content\n return p", "def graph(graph_file):\n return render_template(graph_file)", "def load_stack(filename):\n data = np.genfromtxt(filename, skip_header=1)\n index_arr = data[:, 2]\n thickness_arr = data[:, 3] / 1e9\n stack = Stack(index_arr, thickness_arr)\n return stack", "def parse_problem(path_to_file):\n with open(path_to_file, 'r') as f:\n lines = f.readlines()\n return parse_problem_lines(lines)", "def file_reader(filename = 'conv_params'):\n\n with open(filename) as f:\n info = f.readlines()\n info = [i.strip() for i in info] # each element in info is a string of a line from the file\n info = [i.split() for i in info] # split each whitespace delimited element into a list of lists\n info = [[i.split('-') for i in j] for j in info] # note info is 3 layers deep\n\n info[2] = info[2][0] # makes default E just a single string of the number\n info[3] = info[3][0]\n\n return info", "def read_from_file(self,fn):\n fh = open(fn,\"r\")\n labels = []\n xyz = []\n sizes = []\n colors = []\n for line in fh.readlines():\n try:\n if not line.startswith(\"#\"):\n label,x,y,z,size,r,g,b = line.split(\",\")\n labels.append(label)\n xyz.append([x,y,z])\n sizes.append(size)\n colors.append((float(r),float(g),float(b)))\n except IOError, ioe:\n print \"IOError:\", ioe\n self._labels = np.array(labels)\n self._xyz = np.array(xyz).astype(\"f\")\n self._sizes = np.array(sizes).astype(\"f\")\n self._colors = np.array(colors)", "def parse_newick_file(filename: str, digraph=True):\n tree = newick.read(filename)\n\n if digraph:\n graph_newick = nx.DiGraph()\n else:\n graph_newick = nx.Graph\n\n none_counter = 1\n\n # Adding root node\n graph_newick.add_node(tree[0], child_position=0)\n\n while tree:\n tree_node = tree[0]\n tree_node, none_counter = rename_none_node(tree_node, none_counter)\n graph_newick, descendants, none_counter = add_newick_node_and_edge(graph_newick, tree_node, none_counter)\n tree += descendants\n tree.remove(tree_node)\n\n return graph_newick", "def read_graph_file(path):\n\n node_graph = graph_pb2.Scene()\n\n if not os.path.exists(path):\n getLog().error(\"Missing graph file: {}\".format(path))\n return\n\n with open(path, \"rb\") as read_file:\n node_graph.ParseFromString(read_file.read())\n\n return node_graph", "def load_graph(f: IO[str], graph_class=Graph) -> list[Graph]:\n return read_graph_list(graph_class, f)", "def load_velodyne_points(filename):\n points = np.fromfile(filename, dtype=np.float32).reshape(-1, 4)\n points[:, 3] = 1.0 # homogeneous\n return points", "def load_graph(path):\r\n \r\n # initialise dictionary\r\n graph_dict = {}\r\n \r\n # open File for reading\r\n graph_file = open(path,'r')\r\n \r\n # initialise list of all the lines\r\n graph_lines = graph_file.readlines() \r\n print (\"Loaded graph has\", len(graph_lines), \"nodes\")\r\n # For all lines get node and edges and put them to SET\r\n for line in graph_lines:\r\n line = line.rstrip('\\n')\r\n neighbors = line.split(' ')\r\n #node = int(neighbors[0])\r\n node = neighbors[0]\r\n graph_dict[node] = set([])\r\n for neighbor in neighbors[1:]:\r\n #graph_dict[node].add(int(neighbor))\r\n graph_dict[node].add(neighbor)\r\n return graph_dict", "def read_graph(args):\n dataset = pd.read_csv(args.features_path).values.tolist()\n edges = {}\n edges[\"positive_edges\"] = [edge[0:2] for edge in dataset if edge[2] == 1]\n edges[\"negative_edges\"] = [edge[0:2] for edge in dataset if edge[2] == -1]\n edges[\"ecount\"] = len(dataset)\n edges[\"ncount\"] = len(set([edge[0] for edge in dataset]+[edge[1] for edge in dataset]))\n return edges", "def graph(self, file_data):\n\n self.file_data = file_data\n self.features, self.headers = get_features(file_data)\n\n # FIXME make it so that the outliers can be visualized as well\n self.new_scaled_features, self.features = manipulate_features(self.features, file_data)\n # features = scaler.inverse_transform(new_scaled_features)\n\n if self.show_outliers:\n self.new_scaled_features, self.outliers, self.outlier_detector = find_and_remove_outliers(\n self.new_scaled_features)\n\n if self.manually_find_remove_outliers:\n selector = remove_outliers(self.new_scaled_features)\n\n self.new_scaled_features = self.new_scaled_features[selector.indexes]\n self.features = self.features[selector.indexes]\n\n self.labels = self.clf.predict(self.new_scaled_features)\n self.color_labels = list(map(lambda x: 'r' if x == 0 else 'b', self.labels))", "def get_gml_data(file_path):\n\n bbox = (2.34592e7,100+6.704e6,2.34603e7,700+6.704e6)\n return gpd.read_file(file_path, bbox=bbox)" ]
[ "0.7323825", "0.6937586", "0.68380326", "0.67994225", "0.67790306", "0.6770359", "0.6753334", "0.669916", "0.6612476", "0.65449065", "0.6532908", "0.6516059", "0.64703196", "0.64033914", "0.63891566", "0.6366815", "0.63625896", "0.634472", "0.6317197", "0.6311096", "0.62962216", "0.6286445", "0.6216713", "0.6215092", "0.6173808", "0.61655265", "0.6153044", "0.614722", "0.61371505", "0.5982137", "0.5981102", "0.5968785", "0.5948722", "0.5943619", "0.5910418", "0.5857068", "0.58464706", "0.5843319", "0.5834283", "0.5791879", "0.5786727", "0.57754725", "0.5749816", "0.5741576", "0.57318574", "0.57290566", "0.572219", "0.5716084", "0.5706609", "0.5681702", "0.56616175", "0.56403387", "0.56371725", "0.5626942", "0.56240535", "0.5612376", "0.55964655", "0.554175", "0.5541632", "0.553931", "0.5538787", "0.55385", "0.5512732", "0.55106497", "0.5505497", "0.5498722", "0.54949623", "0.5494881", "0.5490971", "0.54873633", "0.5485108", "0.54595274", "0.5438225", "0.54305196", "0.54189223", "0.5416715", "0.54149514", "0.5396342", "0.5396104", "0.53928334", "0.53928334", "0.5391913", "0.5382356", "0.538175", "0.5381095", "0.5381095", "0.5376285", "0.53747743", "0.5368614", "0.536685", "0.5365132", "0.53625625", "0.53575045", "0.5356035", "0.53504425", "0.53496504", "0.53437823", "0.5331091", "0.5326578", "0.53245866" ]
0.7031254
1
Standard backtracking approach to find the optimal opmesh assignment, starting with the optimal number of stages (best_n_stages). The return is a list [((layer_start, next_layer_start), submesh_shape_idx, sharding_config_idx)] where (layer_start, next_layer_start) is [) slice of the ops and submesh_shape_idx is the submesh those ops should be mapped to (sharding_config_idx is currently always 1 but will be eventually used pick optimal tensor sharding configuration).
def get_optimal_submesh_assignments( best_n_stages, F_argmin, n_devices, n_ops, submesh_sizes ): current_s = best_n_stages current_layer = 0 current_devices = n_devices optimal_layer_submesh_assignments = [] while current_s > 0 and current_layer < n_ops and current_devices > 0: next_start_layer, submesh_shape_idx, sharding_config_idx = F_argmin[ current_s, current_layer, current_devices ] assert next_start_layer != -1 and current_devices != -1 optimal_layer_submesh_assignments.append( ((current_layer, next_start_layer), submesh_shape_idx, sharding_config_idx) ) current_s -= 1 current_layer = next_start_layer current_devices -= submesh_sizes[submesh_shape_idx] assert current_s == 0 and current_layer == n_ops and current_devices == 0 return optimal_layer_submesh_assignments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin", "def inter_op_dp(\n n_layers: int,\n n_devices: int,\n n_microbatches: int,\n submesh_shapes: List[Tuple[int, int]],\n intra_compute_costs,\n max_n_succ_stages,\n):\n min_cost = np.inf\n best_solution = None\n prev_intra_cost = 0.0\n gap = 1e-6\n\n submesh_sizes: list = NumbaList()\n for n, m in submesh_shapes:\n submesh_sizes.append(n * m)\n\n for intra_cost in np.sort(np.unique(intra_compute_costs)):\n if intra_cost - prev_intra_cost < gap:\n continue\n if intra_cost * n_microbatches >= min_cost:\n break\n\n # Optimization that lifts a check for stage_cost <= t_max_stage_cost\n # out of the inner dp loop (see alpa/~/stage_construction.py#L121).\n # This yields a ~100-200x improvement over the baseline implementation.\n valid_cost_idxs = np.transpose((intra_compute_costs <= intra_cost).nonzero())\n # This corresponds to the i of k <= i <= K from eqn. 3 in the alpa paper.\n valid_cost_idxs = valid_cost_idxs[\n valid_cost_idxs[:, 0] <= valid_cost_idxs[:, 1]\n ]\n valid_costs = intra_compute_costs[tuple(valid_cost_idxs.T)]\n valid_idxs_costs = np.hstack([valid_cost_idxs, valid_costs[:, np.newaxis]])\n\n F, F_stage_max, F_argmin = inter_op_dp_inner_loop(\n n_layers,\n n_devices,\n submesh_sizes,\n valid_idxs_costs,\n max_n_succ_stages,\n )\n\n best_n_stages = F[:, 0, n_devices].argmin()\n all_stages_cost = F[best_n_stages, 0, n_devices]\n slowest_stage_cost = F_stage_max[best_n_stages, 0, n_devices]\n if np.isinf(all_stages_cost):\n continue\n slowest_stage_total_cost = (n_microbatches - 1) * slowest_stage_cost\n\n if all_stages_cost + slowest_stage_total_cost < min_cost:\n min_cost = all_stages_cost + slowest_stage_total_cost\n best_solution = best_n_stages, F_argmin\n prev_intra_cost = intra_cost\n\n assert best_solution is not None\n best_n_stages, F_argmin = best_solution\n optimal_layer_submesh_assignments = get_optimal_submesh_assignments(\n best_n_stages, F_argmin, n_devices, n_layers, submesh_sizes\n )\n return optimal_layer_submesh_assignments", "def flax_shard_checkpoint(params, max_shard_size=\"10GB\"):\n max_shard_size = convert_file_size_to_int(max_shard_size)\n\n sharded_state_dicts = []\n current_block = {}\n current_block_size = 0\n total_size = 0\n\n # flatten the weights to chunk\n weights = flatten_dict(params, sep=\"/\")\n for item in weights:\n weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)\n\n # If this weight is going to tip up over the maximal size, we split.\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = {}\n current_block_size = 0\n\n current_block[item] = weights[item]\n current_block_size += weight_size\n total_size += weight_size\n\n # Add the last block\n sharded_state_dicts.append(current_block)\n\n # If we only have one shard, we return it\n if len(sharded_state_dicts) == 1:\n return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None\n\n # Otherwise, let's build the index\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = FLAX_WEIGHTS_NAME.replace(\".msgpack\", f\"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack\")\n shards[shard_file] = shard\n for weight_name in shard.keys():\n weight_map[weight_name] = shard_file\n\n # Add the metadata\n metadata = {\"total_size\": total_size}\n index = {\"metadata\": metadata, \"weight_map\": weight_map}\n return shards, index", "def dfs_maximizing(state) :\n #print state.describe_previous_move()\n global state_evals, path, _path, _score, level, _state;\n\n level+=1\n path.append(state)\n for stt in state.generate_next_states():\n score=0\n agenda.append((stt, level))\n \n if stt.is_game_over():\n state_evals+=1\n score=stt.get_endgame_score()\n if score>_score:\n _score=score\n _path = path[0:]\n _state = stt\n if not agenda:\n\n _path.append(_state)\n return [_path, _score, state_evals];\n else:\n new_state, level=agenda.pop()\n path=path[0:level]\n level-=1\n return dfs_maximizing(new_state)", "def get_optimal_patches(self):\n self.optimal_patch_centers = list()\n # Backtrace through cost to determine optimal samples\n for i in range(self.cost_matrix.shape[0] - 1, -1, -1):\n idx = self.nodes_min_energy_index(i)\n node = self.min_energy_index[i][idx]\n self.optimal_patch_centers.append(node)\n self.optimal_patch_centers.reverse()\n self.optimal_patch_centers = [\n int(patch) for patch in self.optimal_patch_centers if np.isfinite(patch)\n ]\n optimal_patch_centers = list()\n for patch_center in self.optimal_patch_centers:\n if (\n self.source_patches[self.patch_centers[patch_center]].size\n != self.patch_size * self.patch_size * 3\n ):\n node = patch_center - 1 if patch_center > 1 else patch_center + 1\n optimal_patch_centers.append(node)\n if optimal_patch_centers:\n self.optimal_patch_centers = optimal_patch_centers", "def minimax_endgame_search(state, maximize=True) :\n global depth;\n depth=0\n path=[]\n paths=[]\n _path, _score = get_minimax_score(state, maximize, path, paths,INF,always_zero)\n\n return [_path, _score, len(paths)]", "def get_floorplan(\n graph: DataflowGraph,\n slot_manager: SlotManager,\n grouping_constraints_in_str: List[List[str]],\n pre_assignments_in_str: Dict[str, str],\n floorplan_strategy: str = 'HALF_SLR_LEVEL_FLOORPLANNING',\n threshold_for_iterative: int = 200,\n floorplan_opt_priority: str = 'AREA_PRIORITIZED',\n min_area_limit: float = 0.65,\n max_area_limit: float = 0.85,\n min_slr_width_limit: int = 10000,\n max_slr_width_limit: int = 15000,\n max_search_time: int = 600,\n hbm_port_v_name_list: List[str] = []\n) -> Tuple[Dict[Vertex, Slot], List[Slot]]:\n # get initial v2s\n init_slot = slot_manager.getInitialSlot()\n init_v2s = {v : init_slot for v in graph.getAllVertices()}\n\n actual_usage = get_actual_usage(init_v2s.keys(), slot_manager.getInitialSlot())\n if max_area_limit < actual_usage:\n max_area_limit = actual_usage + 0.1\n cli_logger.warning('The specified max_area_limit is less than the actual usage of the design: %f. '\n 'Adjust max_area_limit to %f', actual_usage, max_area_limit)\n if min_area_limit < actual_usage:\n min_area_limit = actual_usage\n cli_logger.warning('Adjust the min_area_limit to the actual usage of the design: %f', actual_usage)\n\n cli_logger.info('')\n cli_logger.info('Floorplan parameters:')\n cli_logger.info('')\n cli_logger.info(' floorplan_strategy: %s', floorplan_strategy)\n cli_logger.info(' threshold for switching to iterative partitioning: %d', threshold_for_iterative)\n cli_logger.info(' floorplan_opt_priority: %s', floorplan_opt_priority)\n cli_logger.info(' min_area_limit: %f', min_area_limit)\n cli_logger.info(' max_area_limit: %f', max_area_limit)\n cli_logger.info(' min_slr_width_limit: %d', min_slr_width_limit)\n cli_logger.info(' max_slr_width_limit: %d', max_slr_width_limit)\n cli_logger.info(' max_search_time per solving: %d', max_search_time)\n cli_logger.info('')\n cli_logger.info('Start floorplanning, please check the log for the progress...\\n')\n\n # get grouping constraints of Vertex\n grouping_constraints: List[List[Vertex]] = [\n [graph.getVertex(v_name) for v_name in v_name_group]\n for v_name_group in grouping_constraints_in_str\n ]\n\n _logger.info(f'The following modules are grouped to the same location:')\n for grouping in grouping_constraints_in_str:\n _logger.info(' ' + ', '.join(grouping))\n\n # get pre_assignment in Vertex\n pre_assignments = { graph.getVertex(v_name) : slot_manager.createSlot(pblock)\n for v_name, pblock in pre_assignments_in_str.items()\n }\n\n # get the hbm port vertices\n hbm_port_v_list = [graph.getVertex(v_name) for v_name in hbm_port_v_name_list]\n for v_name in hbm_port_v_name_list:\n _logger.info('Binding of HBM vertex %s is subject to change', v_name)\n\n print_pre_assignment(pre_assignments)\n\n print_vertex_areas(init_v2s.keys(), slot_manager.getInitialSlot())\n\n params = {\n 'floorplan_opt_priority': floorplan_opt_priority,\n 'min_area_limit': min_area_limit,\n 'max_area_limit': max_area_limit,\n 'min_slr_width_limit': min_slr_width_limit,\n 'max_slr_width_limit': max_slr_width_limit,\n 'max_search_time': max_search_time,\n 'hbm_port_v_list': hbm_port_v_list,\n }\n\n # choose floorplan method\n num_vertices = len(graph.getAllVertices())\n v2s: Dict[Vertex, Slot] = {}\n\n # if user specifies floorplan methods\n if floorplan_strategy == 'SLR_LEVEL_FLOORPLANNING':\n _logger.info(f'user specifies to floorplan into SLR-level slots')\n v2s = partition(\n init_v2s, slot_manager, grouping_constraints, pre_assignments, partition_method='FOUR_WAY_PARTITION', **params\n )\n\n if v2s:\n return v2s, get_four_way_partition_slots(slot_manager)\n else:\n return None, None\n\n elif floorplan_strategy == 'QUICK_FLOORPLANNING':\n _logger.info(f'user specifies to prioritize speed')\n v2s = iterative_bipartition(init_v2s, slot_manager, grouping_constraints, pre_assignments)\n if v2s:\n return v2s, get_eight_way_partition_slots(slot_manager)\n else:\n return None, None\n\n else:\n if floorplan_strategy != 'HALF_SLR_LEVEL_FLOORPLANNING':\n raise NotImplementedError('unrecognized floorplan strategy %s', floorplan_strategy)\n\n # empirically select the floorplan method\n if num_vertices < threshold_for_iterative:\n _logger.info(f'There are {num_vertices} vertices in the design, use eight way partition')\n\n if num_vertices > 100:\n _logger.warning('Over 100 vertices. May have a long solving time. Reduce threshold_for_iterative to skip to iterative bi-partitioning.')\n\n v2s = partition(\n init_v2s, slot_manager, grouping_constraints, pre_assignments, partition_method='EIGHT_WAY_PARTITION', **params\n )\n if v2s:\n return v2s, get_eight_way_partition_slots(slot_manager)\n else:\n _logger.warning(f'Please check if any function in the design is too large')\n\n _logger.info(f'Use four-way partition because eight-way partition failed or there are too many vertices ({num_vertices})')\n v2s = partition(\n init_v2s, slot_manager, grouping_constraints, pre_assignments, partition_method='FOUR_WAY_PARTITION', **params\n )\n if v2s:\n final_v2s = iterative_bipartition(v2s, slot_manager, grouping_constraints, pre_assignments, partition_order=[Dir.vertical])\n\n if final_v2s:\n return final_v2s, get_eight_way_partition_slots(slot_manager)\n else:\n return v2s, get_four_way_partition_slots(slot_manager)\n\n _logger.error(f'AutoBridge fails to partition the design at the SLR level. Either the design is too large, or the functions/modules are too large.')\n return None, None", "def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##", "def search(self, pid, start, layers):\n plan = []\n workload = [0 for _ in range(len(self.workers))]\n\n # each layer is a separate search for the worker to process the layer\n for i in range(len(layers)):\n layer = layers[i]\n target_color = layer[\"color\"]\n target_thickness = layer[\"thickness\"]\n processing_costs = {k: math.ceil(target_thickness / self.processing_rate[k][target_color]) for k in self.processing_rate}\n\n # Searches to find the cost of processing every node at each worker.\n # Cost consists of: Cost of the path \n # + Existing workload cost \n # + processing cost by the worker\n # \n # Basically Dijkstra's.\n visited = set()\n path = {}\n path_costs = {}\n pq = [(0, start)]\n curr_costs = {}\n\n # Assumes single connected component \n while len(visited) != len(self.workers):\n cost, curr = heapq.heappop(pq)\n if curr in visited: continue\n visited.add(curr)\n curr_costs[curr] = cost + processing_costs[self.worker_flavor[curr]] + self.workload[curr]\n if curr == self.origin:\n curr_costs[curr] += self.origin_penalty\n for neighbor in self.neighbors[curr]:\n if neighbor in visited: continue\n cost_new = cost + 1 \n if neighbor == self.origin:\n cost_new += self.origin_penalty\n if neighbor not in path_costs or cost_new < path_costs[neighbor]:\n path_costs[neighbor] = cost_new\n path[neighbor] = curr\n heapq.heappush(pq, (cost_new, neighbor))\n\n # Get the best cost and candidate for processing the current layer\n best_cost = float(\"inf\")\n best_cand = -1\n for cand in curr_costs:\n if curr_costs[cand] < best_cost:\n best_cost = curr_costs[cand]\n best_cand = cand\n\n # If the best candidate isn't the starting node, add the cost of the\n # path for future workload considerations\n if best_cand != start:\n # create the path \n best_path = [best_cand]\n while best_path[-1] != start:\n best_path.append(path[best_path[-1]])\n best_path = best_path[::-1]\n\n # Add the Pass operations to the plan\n prev = start \n for curr in best_path[1:]:\n workload[prev] += 1\n plan.append([1, {\"Pass\":{\"pearl_id\":pid,\"to_worker\":curr}}])\n prev = curr\n\n # Add the noms to the plan \n workload[best_cand] += processing_costs[self.worker_flavor[best_cand]]\n plan.append([processing_costs[self.worker_flavor[best_cand]], {\"Nom\": pid}])\n\n # Set the last worker in the path as the start of the next search pass\n start = best_cand\n return plan, workload, start", "def _extract_solution(self, manager: RoutingIndexManager, routing: RoutingModel, assignment: Assignment, indices_to_visit: List[int]) -> Dict[str, Any]:\n sln = {\"objective\": assignment.ObjectiveValue()}\n \n stop_indices = []\n index = routing.Start(0)\n while not routing.IsEnd(index):\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n sln[\"order\"] = stop_indices\n return sln", "def stage_mesh_axis(self):\n stage_mesh_axis = None\n p = self.params\n if p.mesh_axis_names is not None:\n stage_mesh_axis = base_layer.to_partition_spec(\n p.weight_split_dims_mapping.stages, p.mesh_axis_names\n )[0]\n return stage_mesh_axis", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def getOptimalSolution(self):\n max_index = np.argmax(self.Ws)\n self.Wmax = self.Ws[max_index]\n self.Emax = self.subsets[max_index]\n return (self.Wmax, self.Emax)", "def managable_mesh_list(mesh_lst, struct_grd=False):\n #step1\n grd_fact = (1+int(struct_grd))\n slice_idx = 0\n for idx, mm in enumerate(mesh_lst):\n num_cells = mm.num_cells()\n if(int(num_cells/grd_fact) <600):\n print(\"removing the mesh at index %i due to low cell count (%i) for peridynamic calculations\"%(idx, int(num_cells/grd_fact)))\n slice_idx = idx\n\n mesh_lst = mesh_lst[slice_idx+1:]\n \n #Step2\n if(len(mesh_lst)> 5):\n print(\"Too many meshes in the list, resizing to managable size\")\n return mesh_lst[0:3]", "def solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic):\n\n result = {}\n result[\"visualStages\"] = []\n for stage in stages:\n\n stage_dic = {}\n object_dic_copy = copy.deepcopy(objects_dic)\n predicates = stage[\"items\"]\n sorted_predicates = priority(predicates, predicates_rules)\n\n # For hanoi problem, reset each stage\n # For logistics problem, reset each stage\n for fname in gstate[\"reset_function\"]:\n gstate[fname] = {}\n solvepredicates(sorted_predicates, object_dic_copy, predicates_rules, gstate)\n stage_dic[\"visualSprites\"] = object_dic_copy\n if \"stageName\" not in stage:\n stage_dic[\"stageName\"] = \"Inital Stage\"\n stage_dic[\"stageInfo\"] = \"No step information\"\n\n else:\n stage_dic[\"stageName\"] = stage[\"stageName\"]\n stage_dic[\"stageInfo\"] = stage[\"stageInfo\"]\n\n result[\"visualStages\"].append(stage_dic)\n\n result[\"subgoals\"] = Subgoal.get_subgoal(stages, problem_dic[1]['goal'].copy(), actionlist.copy())\n\n return result", "def solve(num_wizards, num_constraints, wizards, constraints): \n global wiz_const\n wiz_const = mapConstraints(wizards, constraints)\n partial_soltns = []\n\n # counter for priority queue since it doesn't allow \n # identical priorities\n k = 0\n\n # list of wizards sorted by lowest to highest degree\n sorted_wiz = sortWizByConsts(wiz_const)\n wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}\n\n const_set = set(map(tuple, constraints))\n for i in range(4) : \n heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))\n k += 1\n\n print(\"setup done, commencing solving\")\n\n while len(partial_soltns) : \n\n # for partial_soltn, const_set in partial_soltns : \n# partial_soltns.remove(partial_soltn)\n num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)\n const = findNextConst(partial_soltn, const_set, wiz_rankings)\n print(\"seen \" + str(len(partial_soltn)) + \"\\t num partial_solutions\\t\" + str(len(partial_soltns)))\n try : \n const_set.remove(const)\n except KeyError : \n print(\"BAD SHIT\")\n pass\n possible_arrangements = [(const[0], const[1], const[2]),\n (const[2], const[0], const[1]), \n (const[2], const[1], const[0]),\n (const[1], const[0], const[2])]\n for arr in possible_arrangements:\n soltn = partial_soltn.copy()\n a, b, c = arr\n if not (soltn.has_node(a) and soltn.has_node(b) and nx.has_path(soltn, a, b)) : \n soltn.add_edge(a, b)\n if not (soltn.has_node(b) and soltn.has_node(c) and nx.has_path(soltn, b, c)) : \n soltn.add_edge(b, c)\n # see if we violated any other constraints (seen or not seen)\n is_valid, num_wiz = validNumWiz(soltn, const_set)\n\n if is_valid and len(list(nx.simple_cycles(soltn))) == 0 :\n heapq.heappush(partial_soltns, (-len(soltn), k, soltn, const_set.copy()))\n k += 1\n # are we done?\n if num_wiz == num_wizards :\n print(\"FINAL SOLUTION (found without processing all constraints but validating against them)\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n if foundCompleteOrdering(heapq.heappop(partial_soltns)) : \n print(\"FINAL SOLUTION\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n print(\"NO SOLUTION FOUND\")\n return \"\"", "def partition_mesh(mesh, n_parts, use_metis=True, verbose=False):\n output('partitioning mesh into %d subdomains...' % n_parts, verbose=verbose)\n timer = Timer(start=True)\n\n if use_metis:\n try:\n from pymetis import part_graph\n\n except ImportError:\n output('pymetis is not available, using naive partitioning!')\n part_graph = None\n\n if use_metis and (part_graph is not None):\n cmesh = mesh.cmesh\n cmesh.setup_connectivity(cmesh.dim, cmesh.dim)\n graph = cmesh.get_conn(cmesh.dim, cmesh.dim)\n\n cuts, cell_tasks = part_graph(n_parts, xadj=graph.offsets.astype(int),\n adjncy=graph.indices.astype(int))\n cell_tasks = nm.array(cell_tasks, dtype=nm.int32)\n\n else:\n ii = nm.arange(n_parts)\n n_cell_parts = mesh.n_el // n_parts + ((mesh.n_el % n_parts) > ii)\n output('cell counts:', n_cell_parts, verbose=verbose)\n assert_(sum(n_cell_parts) == mesh.n_el)\n assert_(nm.all(n_cell_parts > 0))\n\n offs = nm.cumsum(nm.r_[0, n_cell_parts])\n cell_tasks = nm.digitize(nm.arange(offs[-1]), offs) - 1\n\n output('...done in', timer.stop(), verbose=verbose)\n\n return cell_tasks", "def build_data_parallel_strategies(\n train_step_graph: GraphModule,\n num_params: int,\n num_states: int,\n mesh: DeviceMesh,\n batch_dim: int = 0,\n) -> Dict[fx.Node, StrategyType]:\n activation_idx = num_params + num_states\n non_compute_ops = [\n aten.clone.default,\n aten.detach.default,\n aten.ones_like.default,\n aten.reshape.default,\n aten.t.default,\n aten.view.default,\n torch.ops._spmd.tag_grad.default,\n operator.getitem,\n ]\n\n tuple_strategy_ops = [aten._fused_adam.default]\n\n dp_strategy_map: Dict[fx.Node, StrategyType] = {}\n batch_dim_analyzer = BatchDimAnalyzer(batch_dim)\n placeholder_idx = 0\n num_param_grad = 0\n\n # first we backward propagate to mark the param gradients sharding\n # with tag_grad node helps and then delete the tag_grad nodes\n for node in reversed(list(train_step_graph.graph.nodes)):\n # find a param_grad node via the tagging\n if node.target == torch.ops._spmd.tag_grad.default:\n cur_node = node\n while cur_node.target in non_compute_ops:\n cur_node = cur_node.args[0]\n partial_strategy = _gen_partial_strategy(mesh)\n dp_strategy_map[cur_node] = DataParallelStrategy(\n NodeType.GRAD, [partial_strategy]\n )\n num_param_grad += 1\n # remove the tag_grad node from graph\n node.replace_all_uses_with(node.args[0])\n train_step_graph.graph.erase_node(node)\n\n if num_param_grad == num_params:\n # early break if we have already processed all param_grads\n break\n\n # next we forward propagate to mark all the sharding\n for node in train_step_graph.graph.nodes:\n if node.op == \"placeholder\":\n if \"val\" not in node.meta:\n # NOTE: There're certain cases where the placeholder nodes do\n # not have real tensor values:\n # 1. optimizer states can be None sometimes, i.e. SGD with\n # no momentum, optimizer states populate `momentum` state\n # as None, the full graph we get from `compile` would have\n # None as the placeholder value\n # 2. function args might not only contain params or activations,\n # but also contain other non-tensor inputs, i.e. the model\n # and optimizer instances baked in as a placeholder, there might\n # also be some scalar argument which is not a tensor\n #\n # For the above cases, we create a NON_TENSOR stratgy so that we\n # know it's not a tensor and we don't need to shard it\n dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, [])\n\n elif placeholder_idx < num_params:\n # during compilation there's an assumption that the first num_params\n # placeholders should be parameters\n shard_strategy = _gen_shard_strategy(mesh, 0)\n replica_strategy = _gen_replicate_strategy(mesh)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.PARAM, [replica_strategy, shard_strategy]\n )\n\n elif placeholder_idx < activation_idx:\n # optimizer states follow the same strategy as\n # the corresponding parameters\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, 0)\n\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.STATE, [replica_strategy, shard_strategy]\n )\n else:\n activation_batch_dim_size = node.meta[\"val\"].shape[batch_dim]\n # find the first activation node and use its batch dim size\n if batch_dim_analyzer.batch_dim_size == -1:\n batch_dim_analyzer.init_batch_dim_size(activation_batch_dim_size)\n\n batch_dim_analyzer.set_batch_dim(node, batch_dim)\n shard_strategy = _gen_shard_strategy(mesh, batch_dim)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [shard_strategy]\n )\n placeholder_idx += 1\n elif node.op == \"call_function\":\n # Annotate node types for the computation graph\n # Data Parallel node propagation logic:\n # param (non-compute) -> out: param\n # grad (non-compute before/after) -> out: grad\n # state -> output: state\n #\n # param + activation (param must be replicate, act be sharded) -> out: activation\n # param/state + grad (param/state/grad be the same spec) -> out: param/state\n # param + state -> out: param\n\n if node.target in non_compute_ops:\n # At this point, we should have removed all the `tag_grad` nodes in the graph\n assert node.target != torch.ops._spmd.tag_grad.default\n\n input_nodes = node.all_input_nodes\n assert (\n len(input_nodes) == 1\n ), f\"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}\"\n arg_strategy = dp_strategy_map[input_nodes[0]]\n\n if node.target == operator.getitem:\n # for getitem call, just forward the strategy from the input\n getitem_idx = node.args[1]\n if isinstance(arg_strategy, TupleStrategy):\n # for tuple strategy, we need to get the child strategy from the tuple\n dp_strategy_map[node] = arg_strategy.childs[getitem_idx]\n else:\n # if it's not a tuple strategy, we just forward the arg strategy\n dp_strategy_map[node] = arg_strategy\n else:\n assert isinstance(arg_strategy, DataParallelStrategy)\n arg_node_type = arg_strategy.node_type\n if arg_node_type == NodeType.PARAM:\n replica_strategy = _gen_replicate_strategy(mesh)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.PARAM, [replica_strategy]\n )\n elif arg_node_type == NodeType.GRAD:\n partial_sig = _gen_partial_strategy(mesh)\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.GRAD, [partial_sig]\n )\n elif arg_node_type == NodeType.ACT:\n arg_node_spec = batch_dim_analyzer.compute_act_spec(\n input_nodes[0], mesh\n )\n\n output_spec = batch_dim_analyzer.compute_act_spec(node, mesh)\n\n shard_strategy = PlacementStrategy(\n output_spec=output_spec, input_specs=[arg_node_spec]\n )\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [shard_strategy]\n )\n else:\n raise RuntimeError(\n f\"non compute op not supporting {arg_node_type}! \"\n )\n\n # finished processing this non-compute node\n continue\n\n # for computatation nodes, we need to check all the inputs\n input_args = node.all_input_nodes\n input_specs = []\n if node in dp_strategy_map:\n # found a param_grad node that already have output pre-filled spec\n # fill in the expected input specs for the pre-filled strategy\n node_strategy = dp_strategy_map[node]\n assert isinstance(node_strategy, DataParallelStrategy)\n node_type = node_strategy.node_type\n assert node_type == NodeType.GRAD\n produce_param_grad_strat = node_strategy.strategies\n has_activation = False\n for arg in input_args:\n arg_strategy = dp_strategy_map[arg]\n assert isinstance(arg_strategy, DataParallelStrategy)\n arg_node_type = arg_strategy.node_type\n if arg_node_type == NodeType.ACT:\n # activation sharded\n has_activation = True\n act_spec = batch_dim_analyzer.compute_act_spec(arg, mesh)\n\n input_specs.append(act_spec)\n\n if has_activation:\n assert len(produce_param_grad_strat) == 1\n produce_param_grad_strat[0].input_specs = input_specs\n elif node.target in tuple_strategy_ops:\n # ops that need to build tuple strategy instead of normal strategy\n # This should happen rarely and only needed when we need to generate\n # different node strategy for multiple outputs (i.e. fused_adam op)\n # TODO: Currently this specializes to fused optimizer ops, but we need\n # to see how to generalize this strategy building logic\n output_strategy_len = len(node.args) - 1\n tuple_strategies = []\n for i in range(output_strategy_len):\n if not isinstance(node.args[i], list):\n raise RuntimeError(\n f\"Expecting list as arg to build Tuple Strategy, but found type {type(node.args[i])}!\"\n )\n # for list/tuple arg, use the first one to find out the node type\n if len(node.args[i]) > 0:\n arg_strategy = dp_strategy_map[node.args[i][0]]\n assert isinstance(arg_strategy, DataParallelStrategy)\n assert arg_strategy.node_type in [\n NodeType.PARAM,\n NodeType.GRAD,\n NodeType.STATE,\n ], \"Expecting param/grad/state as arg to build Tuple Strategy!\"\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)\n out_node_strategy: StrategyType = DataParallelStrategy(\n arg_strategy.node_type, [replica_strategy, shard_strategy]\n )\n\n tuple_strategies.append(out_node_strategy)\n\n output_tuple_strategy = TupleStrategy(tuple(tuple_strategies))\n dp_strategy_map[node] = output_tuple_strategy\n else:\n # NOTE: This is the common region for all regular computation ops\n\n input_node_types = [\n cast(DataParallelStrategy, dp_strategy_map[arg]).node_type\n for arg in input_args\n if isinstance(dp_strategy_map[arg], DataParallelStrategy)\n ]\n if NodeType.GRAD in input_node_types:\n # param/state + grad, build up acceptable strategy\n # the strategy should be the same for all the inputs/outputs\n # TODO: optimizer parts should follow the dtensor prop logic\n # to support more general cases that allows optimizer states\n # to have different shardings compare to the params\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)\n output_node_type = NodeType.PARAM\n\n non_grad_types = [t for t in input_node_types if t != NodeType.GRAD]\n\n output_node_type = non_grad_types[0]\n for non_grad_type in non_grad_types:\n assert (\n non_grad_type == output_node_type\n ), f\"Found more than one non grad types! Expect {output_node_type} but found {non_grad_type}!\"\n assert output_node_type in [\n NodeType.PARAM,\n NodeType.STATE,\n ], f\"Expecting output node type to be either state or param, but found {output_node_type}!\"\n\n dp_strategy_map[node] = DataParallelStrategy(\n output_node_type, [replica_strategy, shard_strategy]\n )\n elif NodeType.STATE in input_node_types:\n # either param + state or state + state\n replica_strategy = _gen_replicate_strategy(mesh)\n shard_strategy = _gen_shard_strategy(mesh, shard_dim=0)\n output_node_type = (\n NodeType.PARAM\n if NodeType.PARAM in input_node_types\n else NodeType.STATE\n )\n\n dp_strategy_map[node] = DataParallelStrategy(\n output_node_type, [replica_strategy, shard_strategy]\n )\n elif NodeType.PARAM in input_node_types:\n if NodeType.ACT in input_node_types:\n # param + activation, build up acceptable strategy\n # param must be replicated, activation must be sharded\n for arg in input_args:\n arg_strategy = dp_strategy_map[arg]\n assert isinstance(arg_strategy, DataParallelStrategy)\n node_type = arg_strategy.node_type\n if node_type == NodeType.ACT:\n # compute activation spec\n act_spec = batch_dim_analyzer.compute_act_spec(\n arg, mesh\n )\n\n input_specs.append(act_spec)\n elif node_type == NodeType.PARAM:\n # param must be replicated\n input_specs.append(\n DTensorSpec(mesh=mesh, placements=[Replicate()])\n )\n else:\n raise RuntimeError(\n f\"Expecting node with parameter and activation, but found {input_node_types}! \"\n )\n # produce activation type sharding for output\n output_spec = batch_dim_analyzer.compute_act_spec(node, mesh)\n\n act_strategy = PlacementStrategy(\n output_spec=output_spec, input_specs=input_specs\n )\n\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [act_strategy]\n )\n else:\n # If inputs only have parameters, the\n # strategy of this node should follow input\n dp_strategy_map[node] = dp_strategy_map[input_args[0]]\n else:\n # If input nodes does not have PARAM/GRAD/STATE, then\n # it should be a pure activation computation, it should\n # produce activation output.\n # Activations are usually sharded unless model creates\n # new tensors during computation, which depend on whether\n # the new tensor associate with a batch dim or not, it could\n # be shard/replicate/partial, batch dim analyzer should tell\n # us the correct sharding.\n for arg in input_args:\n arg_strategy = dp_strategy_map[arg]\n assert isinstance(arg_strategy, DataParallelStrategy)\n input_spec = batch_dim_analyzer.compute_act_spec(arg, mesh)\n\n input_specs.append(input_spec)\n\n act_spec = batch_dim_analyzer.compute_act_spec(node, mesh)\n op_strategy = PlacementStrategy(\n output_spec=act_spec, input_specs=input_specs\n )\n dp_strategy_map[node] = DataParallelStrategy(\n NodeType.ACT, [op_strategy]\n )\n\n elif node.op == \"output\":\n dp_strategy_map[node] = DataParallelStrategy(NodeType.NON_TENSOR, [])\n else:\n raise RuntimeError(f\"op code {node.op} not supported\")\n\n return dp_strategy_map # type: ignore[return-value]", "def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))", "def find_max_score_location(grid, shape):", "def get_bc_parts(mesh, lst):\n if len(lst) > 0:\n shift = max(0, -min(e.value for e in lst))\n else:\n return [], [], 0, FacetFunction(\"size_t\", mesh, 0)\n # values must be shifted by smallest Steklov value since size_t is unsigned\n fun = FacetFunction(\"size_t\", mesh, shift)\n for bc in lst:\n sub = OnBoundary()\n # overwrite inside function with the one from bc\n sub.inside = bc.getTest()\n sub.mark(fun, bc.value + shift)\n # some conditions may cancel eachother\n exist = set(np.unique(fun.array()))\n lst = [e for e in lst if e.value+shift in exist]\n # separate Robin and Steklov, Dirichlet and Neumann are irrelevant\n Robin = [e for e in lst if e.value > 1 and e.parValue != 0]\n Steklov = [e for e in lst if e.value < 0 and e.parValue != 0]\n return Robin, Steklov, shift, fun", "def get_best_clique(self):\n\t\treturn [i+1 for i in range(self._size) if self._globalMinimumState[i] == 1]", "def best_move(data, indexing, cf, cf_prime, N=20, M=30): \n stats = {}\n timer = time()\n ns = list(neighbours(indexing, random_stream=N))\n stats[\"n_neighbours\"] = len(ns)\n stats[\"t_neighbours\"] = 1000*(time() - timer)\n\n dt_rcs = []\n bestpair, best_rcost = None, None\n for v,k in ns:\n timer = time()\n rc = reduced_cost(data, indexing, cf, cf_prime, v, k, uw_sample_count=M)\n dt_rcs.append(1000*(time() - timer))\n if bestpair is None or rc > best_rcost:\n bestpair = v,k\n best_rcost = rc\n\n stats[\"t_rcs_mean\"] = np.mean(dt_rcs)\n stats[\"t_rcs_std\"] = np.std(dt_rcs)\n stats[\"t_rcs_sum\"] = np.sum(dt_rcs)\n stats[\"rc\"] = best_rcost\n stats[\"partcount\"] = np.unique(indexing).shape[0]\n return bestpair, best_rcost, stats", "def Compute_Grid(Idx, Coeff, q_max, rules, growth, LevelMax, sc, p, tol ):\n\n seed = 123456789\n #Coeff= Sandia.calculate_coefficients(Idx, q_max)\n new_np = Sandia.max_next_points(Idx, Coeff, rules, growth)\n points = Sandia.weights_and_points(new_np, LevelMax, Idx, Coeff, growth, rules, sc, p)\n N_Unique, sparse_index = Sandia.unique_points(seed, tol, points)\n return Sandia.reduce_points_and_weights(N_Unique, points, Idx, sparse_index, Coeff, growth, rules, sc, p)", "def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = 10 # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # ##print(p, map_size, num_feat, tmp)\n return tmp", "def refinemesh(prev_legs, state0_chaser, n_s):\n scored_points = np.array([])\n for leg in prev_legs:\n scored_point = [*leg.dv, leg.t_leg, leg.score]\n scored_points = np.append(scored_points, scored_point)\n scored_points = scored_points.reshape(len(prev_legs), 5)\n tri = Delaunay(scored_points[:, 0:4], qhull_options='QJ')\n m_max = max(scored_points[:, 4]) # Maximum trajectory score of all simplices of the triangulation\n if m_max == 0:\n print('algorithm.py: m_max = 0 because all leg scores are 0!!!')\n m_max = 1 # to avoid raising the dividing by 0 error if all leg scores are 0\n g_max = 1\n for q in tri.simplices:\n smplx_scores = scored_points[:, 4][q] # scores of the points defining the simplex\n aux = mean(smplx_scores)\n if g_max < aux:\n g_max = aux\n\n simplices_scored = []\n for q in tri.simplices:\n smplx_score = compute_simplexscore(q, scored_points, m_max, g_max)\n # simp_scored = [smplx_score, q_vec]\n simplices_scored.append([smplx_score, q])\n sorted_simp_scores = sorted(simplices_scored, reverse=True) # ranks the simplices based on score\n new_samples = samplewithinbestsimplices(sorted_simp_scores, tri.points, n_s)\n\n new_legs = []\n for s in new_samples:\n leg = Leg(s[0:3], s[3], state0_chaser)\n new_legs.append(leg)\n\n return new_legs", "def partition(data, s, b, u, res, points, size, depth):\r\n\t# depth is just for demonstration purposes, terminating the recursion early\r\n\t\r\n\t# termination conditions\r\n\tif size > 1 and depth > 0:\r\n\r\n\t\t# variables that keep track of the scope of \"points\" for iteration purposes\r\n\t\trlen = []\r\n\t\tclen = len(points)\r\n\t\tfor i in range(clen):\r\n\t\t\trlen.append(len(points[i]))\r\n\t\t\r\n\t\t# keeps track of which point defines the maximal set\r\n\t\tmax = -10000\r\n\t\tmax_index = [0,0]\r\n\r\n\t\t# each point on the grid defines a potentially maximal set (including that point and the best \r\n\t\t# choice for higher rows) s[x][y] tracks the value of the set defined by (x, y)\r\n\t\tfor i in range(len(points)):\r\n\t\t\t# calculating s based on current row\r\n\t\t\ts[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]] = data[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]]\r\n\t\t\tfor j in range(rlen[i] - 2, -1, -1):\r\n\t\t\t\ts[points[i][j][0]][points[i][j][1]] = s[points[i][j + 1][0]][points[i][j + 1][1]] + data[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\r\n\t\t\t# if below the first row, factoring in the optimal set from above rows\r\n\t\t\tif i != 0:\r\n\t\t\t\tprev_end = points[i-1][rlen[i-1]-1]\r\n\t\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\t\tu[points[i][j][0]][points[i][j][1]] = b[prev_end[0]][np.minimum(prev_end[1], points[i][j][1])]\r\n\t\t\t\t\ts[points[i][j][0]][points[i][j][1]] += s[prev_end[0]][u[points[i][j][0]][points[i][j][1]]]\r\n\t\t\t\r\n\t\t\t# keeping track of the best sets from the new row for later use (what b and u are for)\r\n\t\t\trow_max = -10000\r\n\t\t\trow_max_index = -1\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tcurr = s[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\tif curr > row_max:\r\n\t\t\t\t\trow_max = curr\r\n\t\t\t\t\trow_max_index = points[i][j][1]\r\n\t\t\t\tb[points[i][j][0]][points[i][j][1]] = row_max_index\r\n\r\n\t\t\t# updating the global optimal set\r\n\t\t\tif row_max > max:\r\n\t\t\t\tmax = row_max\r\n\t\t\t\tmax_index[0] = i\r\n\t\t\t\tmax_index[1] = row_max_index\r\n\t\t\r\n\t\t# finding the set of points that generated the global optimum\r\n\t\tpointers = []\r\n\t\tpointers.append(max_index[1])\r\n\t\tfor i in range(max_index[0], 0, -1):\r\n\t\t\tpointers.append(u[points[i][0][0]][pointers[max_index[0]-i]])\r\n\t\tpointers = np.flip(pointers, axis=0)\r\n\t\t\r\n\t\t# finding the set of points of the upper and lower partitions defined by the optimal set\r\n\t\tupper_points = []\r\n\t\tlower_points = []\r\n\t\tup_num = 0\r\n\t\tlow_num = 0\r\n\t\tfor i in range(clen):\r\n\t\t\turow = []\r\n\t\t\tlrow = []\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tif i <= max_index[0] and points[i][j][1] >= pointers[i]:\r\n\t\t\t\t\turow.append(points[i][j])\r\n\t\t\t\t\tup_num += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tlrow.append(points[i][j])\r\n\t\t\t\t\tlow_num += 1\r\n\t\t\tif len(urow) > 0:\r\n\t\t\t\tupper_points.append(tuple(urow))\r\n\t\t\tif len(lrow) > 0:\r\n\t\t\t\tlower_points.append(tuple(lrow))\r\n\r\n\t\t# updating the final result and prepping the new datasets to have mean 0\r\n\t\tfor i in range(len(upper_points)):\r\n\t\t\tfor j in range(len(upper_points[i])):\r\n\t\t\t\tres[upper_points[i][j][0]][upper_points[i][j][1]] += max/up_num\r\n\t\t\t\tdata[upper_points[i][j][0]][upper_points[i][j][1]] -= max/up_num\r\n\t\tfor i in range(len(lower_points)):\r\n\t\t\tfor j in range(len(lower_points[i])):\r\n\t\t\t\tres[lower_points[i][j][0]][lower_points[i][j][1]] -= max/low_num\r\n\t\t\t\tdata[lower_points[i][j][0]][lower_points[i][j][1]] += max/low_num\r\n\t\t\r\n\t\t# recursion (if the optimal set is the current one, stop since at this point \r\n\t\t# the mean of the selected elements is optimal over them)\r\n\t\tif up_num != size:\r\n\t\t\tpartition(data, s, b, u, res, upper_points, up_num, depth-1)\r\n\t\tif low_num != size:\r\n\t\t\tpartition(data, s, b, u, res, lower_points, low_num, depth-1)\r\n\telse:\r\n\t\treturn", "def create_partition(mesh,polygons,enforce_exact=False):", "def optimal_route(graph,homes,source):\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n optimal_cost = np.Inf\n optimal_dropoffs = dict()\n optimal_route = []\n optimal_num_clusters = 0\n\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n if cost < optimal_cost:\n optimal_cost = cost\n optimal_route = route \n optimal_dropoffs = dropoffs\n optimal_num_clusters = num_clusters\n\n return optimal_cost, optimal_dropoffs, optimal_route, optimal_num_clusters", "def eo_edges(self):\n logger.info(\"eo_edges called\")\n permutations = []\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # Build a list of the wing strings at each midge\n wing_strs = []\n\n for _, square_index, partner_index in midges_recolor_tuples_555:\n square_value = self.state[square_index]\n partner_value = self.state[partner_index]\n wing_str = square_value + partner_value\n wing_str = wing_str_map[square_value + partner_value]\n wing_strs.append(wing_str)\n\n # build a list of all possible EO permutations...an even number of edges must be high\n for num in range(4096):\n num = str(bin(num)).lstrip(\"0b\").zfill(12)\n if num.count(\"1\") % 2 == 0:\n permutations.append(list(map(int, num)))\n\n # Put all 2048 starting states in a file and point ida-via-graph\n # at the file so it can solve all of them and apply the one that is the shortest.\n lr_center_stage_states = []\n eo_outer_orbit_states = []\n eo_inner_orbit_states = []\n\n for permutation in permutations:\n must_be_uppercase = []\n must_be_lowercase = []\n self.state = original_state[:]\n\n for wing_str, uppercase in zip(wing_strs, permutation):\n if uppercase:\n must_be_uppercase.append(wing_str)\n else:\n must_be_lowercase.append(wing_str)\n\n # logger.info(\"%s: %s permutation %s\" % (self, index, \"\".join(map(str, permutation))))\n self.edges_flip_orientation(must_be_uppercase, must_be_lowercase)\n\n # build lists of the states that we need to find state_indexes for\n lr_center_stage_states.append(self.lt_phase3_lr_center_stage.state())\n eo_outer_orbit_states.append(self.lt_phase3_eo_outer_orbit.state())\n eo_inner_orbit_states.append(self.lt_phase3_eo_inner_orbit.state())\n\n # now we have a huge list of states to lookup, do a binary search on multiple states at once (this is drastically faster\n # than binary searching for them individually). state_index_multiple() will return a dict where the state is the key\n # and the state_index is the value.\n lr_center_stage_eo_inner_orbit_state_indexes = self.lt_phase3_lr_center_stage.state_index_multiple(\n lr_center_stage_states\n )\n eo_outer_orbit_state_indexes = self.lt_phase3_eo_outer_orbit.state_index_multiple(eo_outer_orbit_states)\n eo_inner_orbit_state_indexes = self.lt_phase3_eo_inner_orbit.state_index_multiple(eo_inner_orbit_states)\n\n # build a list of tuples of the state indexes\n pt_state_indexes = []\n for lr_center_stage_eo_inner_orbit_state, eo_outer_orbit_state, eo_inner_orbit_state in zip(\n lr_center_stage_states, eo_outer_orbit_states, eo_inner_orbit_states\n ):\n pt_state_indexes.append(\n (\n lr_center_stage_eo_inner_orbit_state_indexes[lr_center_stage_eo_inner_orbit_state],\n eo_outer_orbit_state_indexes[eo_outer_orbit_state],\n eo_inner_orbit_state_indexes[eo_inner_orbit_state],\n )\n )\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # When solve_via_c is passed pt_state_indexes (2048 lines of states in this case), it will try all 2048 of them\n # to find the state that has the shortest solution.\n self.lt_phase3.solve_via_c(pt_states=pt_state_indexes)\n\n self.print_cube_add_comment(\"edges EOed into high/low groups\", tmp_solution_len)\n self.post_eo_state = self.state[:]\n self.post_eo_solution = self.solution[:]\n\n # re-color the cube so that the edges are oriented correctly so we can\n # pair 4-edges then 8-edges. After all edge pairing is done we will uncolor\n # the cube and re-apply the solution.\n self.edges_flip_orientation(wing_strs, [])\n self.highlow_edges_print()", "def gather_surface_coords_for_parts(surface_coords_cropped,\n highest_scoring_part):\n max_detections, height, width, num_parts, _ = (\n shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))\n flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])\n flattened_part_ids = tf.reshape(highest_scoring_part, [-1])\n\n # Produce lookup indices that represent the locations of the highest scoring\n # parts in the `flattened_surface_coords` tensor.\n flattened_lookup_indices = (\n num_parts * tf.range(max_detections * height * width) +\n flattened_part_ids)\n\n vu_coords_flattened = tf.gather(flattened_surface_coords,\n flattened_lookup_indices, axis=0)\n return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])", "def calculate_best_mesh_abstraction_path(self, record_id: str) -> Tuple:\n base_indexes = {}\n # get all abstractions paths from recordID\n result = self.graphdb.get_all_index_listings_of_a_mesh_record(record_id)\n for binding in result[\"results\"][\"bindings\"]:\n base_indexes[binding[\"index\"][\"value\"]] = []\n\n for base_index in base_indexes:\n root_index_found = False\n current_parent_index = base_index\n while not root_index_found:\n dot_idx = current_parent_index.rfind(\".\")\n if dot_idx == -1:\n current_parent_index = current_parent_index[0:1]\n else:\n current_parent_index = current_parent_index[:dot_idx]\n base_indexes[base_index].append(current_parent_index)\n if \".\" not in current_parent_index:\n root_index_found = True\n\n parent_record_paths = {}\n for base_index in base_indexes:\n parent_record_paths[base_index] = []\n for parent_index in base_indexes[base_index]:\n if len(parent_index) == 1:\n # get group name instead:\n parent_record_paths[base_index].append(parent_index)\n continue\n else:\n result = self.graphdb.get_record_id_from_mesh_listing_index(parent_index)\n\n for binding in result[\"results\"][\"bindings\"]:\n parent_record_paths[base_index].append(self.graphdb.remove_uri(binding[\"record\"][\"value\"]))\n\n walks = {}\n for parent_path in parent_record_paths:\n walks[parent_path] = self.graphdb.generate_path_comparison_walk_mesh_record_id_list(\n parent_record_paths[parent_path])\n\n threshold_reached, all_similarities, cor_walk = self.calculate_best_fitting_word_group(None, walks)\n walk_key_list = list(walks.keys())\n walk_val_list = list(walks.values())\n if cor_walk in walk_val_list:\n best_abstraction_path = walk_key_list[walk_val_list.index(cor_walk)]\n\n return best_abstraction_path, all_similarities\n\n else:\n return None, None", "def solution_path(self) -> list[State]:", "def detect_splits(self):\n logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))\n indices_all = np.arange(self.X.shape[0], dtype=int)\n segs = [indices_all]\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.Dchosen[self.iroot])\n else:\n tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to \"0\"\n tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n if self.clusters_precomputed_names:\n self.segs_names_original = [', '.join(self.clusters_precomputed_names)]\n segs_undecided = [True]\n segs_adjacency = [[]]\n segs_distances = np.zeros((1, 1))\n segs_adjacency_nodes = [{}]\n # logg.info(' do not consider groups with less than {} points for splitting'\n # .format(self.min_group_size))\n for ibranch in range(self.n_splits):\n if self.clusters == 'unconstrained_segments':\n iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.info('... partitioning converged')\n break\n logg.info('... branching {}:'.format(ibranch + 1),\n 'split group', iseg)\n segs_distances = self.do_split(segs, segs_tips,\n segs_undecided,\n segs_adjacency,\n segs_distances,\n iseg, new_tips)\n else:\n logg.msg(' split', ibranch + 1, v=4)\n stop, segs_distances = self.do_split_constrained(segs, segs_tips,\n segs_adjacency,\n segs_adjacency_nodes,\n segs_distances)\n if stop: break\n\n # segments\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_sizes = []\n for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))\n\n # the full, unscaled adjacency matrix\n self.segs_adjacency_full_attachedness = 1/segs_distances\n # if self.attachedness_measure == 'connectedness':\n # norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))\n # self.segs_adjacency_full_attachedness /= norm\n self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \\\n = self.compute_adjacency_confidence(\n self.segs_adjacency_full_attachedness,\n segs_adjacency,\n self.tree_based_confidence)\n np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)", "def indices(self):\n return self._kbounded_partitions", "def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []", "def build_graph(mesh, evals, nevals,nfix, step=1.0, params=OptimizationParams()): #smoothing='absolute', numsteps=40000):\n [Xori,TRIV,n, m, Ik, Ih, Ik_k, Ih_k, Tpi, Txi, Tni, iM, Windices, Ael, Bary] = mesh\n\n dtype='float32'\n if(Xori.dtype=='float64'):\n dtype='float64'\n if(Xori.dtype=='float16'):\n dtype='float16'\n print(dtype)\n graph = lambda: None\n \n #model the shape deformation as a displacement vector field\n dX = tf.Variable((0*Xori).astype(dtype) );\n scaleX = tf.Variable(1,dtype=dtype); #not used in shape alignment\n \n graph.input_X = tf.placeholder(shape=dX.shape,dtype=dtype);\n graph.assign_X = tf.assign(dX, graph.input_X-Xori*scaleX).op;\n \n graph.X=Xori*scaleX+dX;\n \n Lx,S,L,Ak = tf_calc_lap(mesh,graph.X)\n\n #Normalized Laplacian\n Si = tf.diag(tf.sqrt(1/S[:,0]))\n Lap = tf.matmul(Si,tf.matmul(Lx,Si));\n\n \n #Spectral decomposition approach\n [s_,v] = tf.self_adjoint_eig( Lap )\n graph.cost_evals_f1 = 1e2*tf.nn.l2_loss( (s_[0:nevals]-evals[0:nevals])* (1/np.asarray(range(1,nevals+1),dtype)) )/nevals # \\\n \n \n #Approach avoiding spectral decomposition - NOT USED\n # [_,EigsOpt,lap] = tfeig(Lap)\n # v = tf.Variable(EigsOpt[:,0:nevals].astype(dtype) );\n # cost_evals_a = 1e3*tf.nn.l2_loss(tf.matmul(tf.transpose(v),v)-tf.eye(nevals,dtype=dtype));\n # cost_evals_b = 1e1*tf.nn.l2_loss( (tf.matmul(Lap,v) - tf.matmul(v,np.diag(evals[0:nevals]).astype(dtype))) )/nevals\n # graph.cost_evals_f2 = cost_evals_a + cost_evals_b\n \n \n meanA, varA = tf.nn.moments(Ak, axes=[0])\n meanL, varL = tf.nn.moments(L, axes=[0])\n\n graph.global_step = tf.Variable(step+1.0, name='global_step',trainable=False, dtype=dtype)\n graph.global_step_val = tf.placeholder(dtype)\n graph.set_global_step = tf.assign(graph.global_step, graph.global_step_val).op \n \n #regularizers decay factor\n cosine_decay = 0.5 * (1 + tf.cos(3.14 * tf.minimum(np.asarray(params.numsteps/2.0,dtype=dtype),graph.global_step) / (params.numsteps/2.0)))\n graph.decay= (1 - params.decay_target) * cosine_decay + params.decay_target\n \n if(params.smoothing=='displacement'): \n graph.vcL = params.curvature_reg*graph.decay * tf.nn.l2_loss( tf.matmul(Bary.astype(dtype),dX)[nfix:,:]);\n graph.vcW = params.smoothness_reg*graph.decay *tf.nn.l2_loss( tf.matmul(Lx,dX)[nfix:,:]) \n if(params.smoothing=='absolute'):\n graph.vcL = params.curvature_reg*graph.decay * tf.nn.l2_loss( tf.matmul(Bary.astype(dtype),S*graph.X)[nfix:,:]);\n graph.vcW = params.smoothness_reg**graph.decay *tf.nn.l2_loss( tf.matmul(Lx,graph.X)[nfix:,:]) \n \n #Volume compuation\n T1 = tf.gather(graph.X, TRIV[:,0])\n T2 = tf.gather(graph.X, TRIV[:,1])\n T3 = tf.gather(graph.X, TRIV[:,2])\n XP = tf.cross(T2-T1, T3-T2)\n T_C = (T1+T2+T3)/3\n graph.Volume = params.volume_reg*graph.decay*tf.reduce_sum(XP*T_C/2)/3\n\n\n #L2 regularizer on total displacement weighted by area elements\n graph.l2_reg = params.l2_reg*tf.nn.l2_loss(S*dX)\n\n \n graph.cost_spectral = graph.cost_evals_f1 + graph.vcW + graph.vcL - graph.Volume + graph.l2_reg\n\n optimizer = tf.train.AdamOptimizer(params.opt_step)\n \n #gradient clipping \n gvs = optimizer.compute_gradients(graph.cost_spectral)\n capped_gvs = [(tf.clip_by_value(grad, -0.0001, 0.0001), var) for grad, var in gvs if grad!=None]\n graph.train_op_spectral = optimizer.apply_gradients(capped_gvs, global_step=graph.global_step)\n\n [graph.s_,v] = tf.self_adjoint_eig( Lap ) \n return graph", "def optimal_solution_multiple_pickup(memo):\n # Calculates what the maximum value is and saves which row and [col][energy] index\n maxvalue = None\n for i in range(len(memo)+1):\n # Sets up initial value\n if maxvalue is None:\n # Sets initial value to first non empty cell[1]\n if memo[len(memo)-1][i][1] is not None:\n maxvalue = (memo[len(memo)-1][i][1], i, 1)\n # Compares first non empty cell[1] with first non empty cell[0]\n if memo[len(memo)-1][i][0] is not None:\n if maxvalue is not None:\n if memo[len(memo) - 1][i][0] > maxvalue[0]:\n maxvalue = (memo[len(memo) - 1][i][0], i, 0)\n # In case first non empty cell[1] was None\n else:\n maxvalue = (memo[len(memo) - 1][i][0], i, 0)\n # After initial value is set. Compares it the other value in that cell to get maximum\n else:\n if memo[len(memo)-1][i][1] is not None:\n if memo[len(memo)-1][i][1] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i][1], i, 1)\n if memo[len(memo)-1][i][0] is not None:\n if memo[len(memo)-1][i][0] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i][0], i, 0)\n\n # Goes back and calculates how the optimal solution was formed\n optimal_solution = [0] * len(memo)\n current_row = maxvalue[1]\n current_index = maxvalue[2]\n # Goes backwards through the array starting at the best value\n for col in range(len(memo)-1, 0, -1):\n # For energy > 0 where it has the choice to pick up or not\n if current_row > 0:\n # Checks if it did pick up. If current cell does not have the same value as the previous column with\n # 1 less energy[current_index] then it must have picked up\n if memo[col][current_row][current_index] != memo[col-1][current_row-1][1]:\n optimal_solution[col] = 1\n\n # Picks the maximum number from previous column and 1 more energy\n if memo[col-1][current_row+1][0] is None:\n current_index = 1\n elif memo[col-1][current_row+1][1] is None:\n current_index = 0\n else:\n if memo[col-1][current_row+1][0] > memo[col-1][current_row+1][1]:\n current_index = 0\n else:\n current_index = 1\n current_row += 1\n # otherwise it did not pick up\n else:\n current_row -= 1\n current_index = 1\n # If at 0 energy then it must have picked up\n else:\n optimal_solution[col] = 1\n current_row += 1\n if memo[col - 1][1][0] is None:\n current_index = 1\n elif memo[col - 1][1][1] is None:\n current_index = 0\n else:\n if memo[col - 1][1][0] > memo[col - 1][1][1]:\n current_index = 0\n else:\n current_index = 1\n return maxvalue[0], optimal_solution", "def showSolution(bestTree)->list:\r\n bestSon = bestTree\r\n solved = bestTree.value\r\n minDepth = bestTree.depth\r\n solution = []\r\n while bestSon.sons:\r\n #print(bestSon.state)\r\n solution.append(bestSon.state)\r\n bestSon = getBestSon(bestSon, minDepth)\r\n #print(bestSon.state)\r\n solution.append(bestSon.state)\r\n if solved == 1:\r\n #print(\"Minimum necessary total trips:\", bestSon.depth)\r\n solution.append(minDepth)\r\n else:\r\n solution.append(-1)\r\n return solution", "def optimize_swarm(problem, swarm_size=20, max_iterations=100,\n max_velocity=3, acc_const=2, neighbour_count=2,\n desired_value=None, epsilon=None, plotter=None):\n\n # Generate initial particles\n swarm = [Particle(problem, generate_variables(problem))\n for i in range(swarm_size)]\n\n # Set current best particle as global_best\n global_best = copy.deepcopy(min(swarm, key=lambda x: x.best_value))\n\n if plotter:\n plotter.set_up_value_plot(\n \"Local paradigm\" if neighbour_count else \"Global paradigm\")\n plotter.update_plots(swarm, global_best, 0)\n\n # Iterate until maximum iterations are reached\n for iteration in range(max_iterations):\n for i in range(len(swarm)):\n if neighbour_count:\n # Pick best from neighbour particles\n idx = range(\n i - int(neighbour_count / 2),\n i + int(neighbour_count / 2) + 1)\n neighbours = [swarm[j % len(swarm)] for j in idx]\n social_best = min(neighbours, key=lambda x: x.best_value)\n else:\n # Otherwise use global best for updating the\n # particle\n social_best = global_best\n swarm[i].update(social_best)\n\n if swarm[i].best_value < global_best.best_value:\n global_best = copy.deepcopy(swarm[i])\n\n if plotter:\n plotter.update_plots(swarm, global_best, iteration)\n\n if desired_value and epsilon:\n if global_best.best_value < desired_value + epsilon:\n break\n\n return global_best, iteration", "def compute_map(current_agent_id,agent_order,number_of_timestep,state_schedules, conv :StateConverter):\r\n #Find the agent has the highest number of time steps\r\n highest_timestep = 0\r\n # Find the highest time step\r\n if len(number_of_timestep) >0:\r\n highest_timestep = np.max(number_of_timestep)\r\n occupancy_map = []\r\n # Since we don't know yet how many time step of the current id so\r\n # the number of time steps of the occupancy map == highest number of time step\r\n # of the current schedule\r\n for time_step in range(int(highest_timestep)):\r\n # Initialize the occupancy for current time step\r\n current_occupancy_map = np.zeros(conv.num_tiles)\r\n # We loop through schedule of each agent at current time step\r\n for i in range(len(state_schedules)):\r\n # Get the agent id of current schedule\r\n agent_of_schedule = agent_order[i]\r\n if time_step < len(state_schedules[i]):\r\n # The first case when the agent of current schedule is executed after the current agent\r\n if agent_of_schedule > current_agent_id:\r\n # Get the current state\r\n current_state = state_schedules[i][time_step]\r\n # Convert the current state to tile index\r\n current_tile = conv.state_to_tile(current_state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[current_tile] = 1\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n # The second case when the agent of current schedule is executed before the current agent\r\n else:\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n if time_step + 2 < len(state_schedules[i]):\r\n # Get the next 2 state\r\n next_2state = state_schedules[i][time_step+2]\r\n # Convert the current state to tile index\r\n next_2tile = conv.state_to_tile(next_2state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[next_2tile] = 1\r\n occupancy_map.append(current_occupancy_map)\r\n return occupancy_map", "def best(self, protocol_name: str, subset: str = \"development\"):\n\n train_dir = Path(\n self.TRAIN_DIR.format(\n experiment_dir=self.experiment_dir,\n protocol=protocol_name,\n subset=subset,\n )\n )\n\n study_name = \"default\"\n optimizer = Optimizer(\n self.pipeline_, db=train_dir / \"trials.journal\", study_name=study_name\n )\n\n try:\n best_loss = optimizer.best_loss\n except ValueError as e:\n print(\"Still waiting for at least one iteration to succeed.\")\n return\n\n best_params = optimizer.best_params\n\n print(f\"Loss = {100 * best_loss:g}% with the following hyper-parameters:\")\n\n content = yaml.dump(best_params, default_flow_style=False)\n print(content)", "def optimal_neighbors(self, graph, controllers : list) -> (list, int):\n\t\t# This isn't efficient and does not take advantage of other variables in the class\n\t\t# TODO: Optimize to use cluster_info\n\t\tclusters = nx.get_node_attributes(graph, 'cluster')\n\t\tneighbors_list = []\n\t\tfor i in controllers:\n\t\t\tcluster = []\n\t\t\tcluster.append(i)\n\t\t\tneighbors = graph.neighbors(i)\n\t\t\tfor neighbor in neighbors:\n\t\t\t\tif(clusters[neighbor] == clusters[i]):\n\t\t\t\t\tcluster.append(neighbor)\n\t\t\tneighbors_list.append(cluster)\n\t\tprint(neighbors_list)\n\t\t# Find best controller set from neighbors\n\t\tcombinations = list(itertools.product(*neighbors_list))\n\t\tmin_dist = 1000000\n\t\tmin_combination = None\n\t\tfor combination in combinations:\n\t\t\tdist = super().step(combination)\n\t\t\tif(dist < min_dist):\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_combination = combination\n\t\treturn (min_combination, min_dist)", "def solve(self, **kwargs):\n\n sorted_compute_nodes = sorted(\n self.physical.compute_nodes,\n key=lambda x: self.physical.cores(x) * 1000 + self.physical.memory(x),\n reverse=True,\n )\n\n for n_partitions_to_try in range(\n self.lower_bound(), len(sorted_compute_nodes) + 1\n ):\n\n # partitioning of virtual nodes in n_partitions_to_try partitions\n k_partition = get_partitions(\n self.virtual.g, n_partitions=n_partitions_to_try\n )\n\n # subset of hosts of size n_partitions_to_try\n chosen_physical = sorted_compute_nodes[:n_partitions_to_try]\n\n #\n # check if the partitioning is a feasible solution\n #\n try:\n # virtual nodes to physical nodes assignment\n res_node_mapping = {}\n\n # iterate over each pair (physical_node i, virtual nodes assigned to i)\n for physical_node, assigned_virtual_nodes in zip(\n chosen_physical, k_partition\n ):\n # keep track of the node physical resources used\n cores_used = memory_used = 0\n # check if node resources are not exceeded:\n for virtual_node in assigned_virtual_nodes:\n # cpu cores\n cores_used += self.virtual.req_cores(virtual_node)\n if self.physical.cores(physical_node) < cores_used:\n raise NodeResourceError(physical_node, \"cpu cores\")\n # memory\n memory_used += self.virtual.req_memory(virtual_node)\n if self.physical.memory(physical_node) < memory_used:\n raise NodeResourceError(physical_node, \"memory\")\n # assign the virtual nodes to a physical node\n res_node_mapping[virtual_node] = physical_node\n\n #\n # virtual links to physical links assignment\n #\n res_link_mapping = {}\n rate_used = defaultdict(int)\n\n # iterate over each virtual link between two virtual nodes not mapped on the same physical machine\n for (u, v) in (\n (u, v)\n for (u, v) in self.virtual.sorted_edges()\n if res_node_mapping[u] != res_node_mapping[v]\n ):\n\n res_link_mapping[(u, v)] = []\n\n # physical nodes on which u and v have been placed\n phy_u, phy_v = res_node_mapping[u], res_node_mapping[v]\n\n # for each link in the physical path\n for (i, j, device_id) in self.physical.find_path(\n phy_u,\n phy_v,\n req_rate=self.virtual.req_rate(u, v),\n used_rate=rate_used,\n ):\n # else update the rate\n rate_used[(i, j, device_id)] += self.virtual.req_rate(u, v)\n\n res_link_mapping[(u, v)].append((i, device_id, j))\n\n # build solution from the output\n self.solution = Solution.build_solution(\n self.virtual, self.physical, res_node_mapping, res_link_mapping\n )\n self.status = Solved\n return Solved\n\n except (NodeResourceError, NoPathFoundError):\n # unfeasible, increase the number of partitions to be used\n pass\n else:\n self.status = Infeasible\n return Infeasible", "def evalPart(origTopo, capacities, subTopos):\r\n # print \"\\t===== Evaluate Partitioning =====\"\r\n numTopos = len(subTopos)\r\n numPM = len(capacities)\r\n if (numTopos > numPM):\r\n logger.error(\"Number of sub topologies does not match number of PMs\")\r\n exit()\r\n \r\n weights = {x:0 for x in range(numPM)}\r\n cutWeights = {x:0 for x in range(numPM)}\r\n subLinks = list(itertools.chain(*[subTopos[x].links(sort=True) for x in range(numTopos)]))\r\n cuts = [x for x in origTopo.links(sort=True) if x not in subLinks]\r\n\r\n for i in range(numTopos):\r\n weights[i] = calcTopoWeight(subTopos[i])\r\n cutWeights[i] = 0\r\n\r\n # for i in range(numTopos):\r\n # weights[i] = 0.0\r\n # for link in subTopos[i].links():\r\n # if origTopo.isSwitch(link[0]) and origTopo.isSwitch(link[1]):\r\n # weights[i] = weights[i] + subTopos[i].linkInfo(link[0], link[1])[\"bw\"]\r\n \r\n for link in cuts:\r\n for i in range(numTopos):\r\n if link[0] in subTopos[i].switches() or link[1] in subTopos[i].switches():\r\n weights[i] = weights[i] + origTopo.linkInfo(link[0], link[1])[\"bw\"]\r\n cutWeights[i] = cutWeights[i] + origTopo.linkInfo(link[0], link[1])[\"bw\"]\r\n\r\n return [weights, cutWeights]\r\n # return sorted(weights.values(), reverse=True)\r\n # wSum = sum(weights.values())\r\n # print \"\\tPart\\tCap\\tWeight\\tFraction\"\r\n # for x in range(numPM):\r\n # print \"\\t%d\\t%.1f\\t%.1f\\t%.4f\" % (x, capacities[x], weights[x], weights[x]/wSum)\r", "def _generate_solution(self):\n \n operation_list = []\n available = {job.get_job_id(): [operation for operation in job.get_operations() if operation.get_sequence() == 0] for job in self.jssp_instance_data.jobs} # dictionary of first unprocessed operations of each job\n \n while 0 < len(available):\n rand_job_id = random.choice(list(available.keys()))\n rand_operation = random.choice(available[rand_job_id])\n rand_machine = np.random.choice(rand_operation.get_required_machines())\n\n available[rand_job_id].remove(rand_operation)\n \n if len(available[rand_job_id]) == 0:\n # if selected operation is last operation of the job \n if rand_operation.get_sequence() == self.jssp_instance_data.get_job(rand_job_id).get_max_sequence():\n del available[rand_job_id]\n else:\n available[rand_job_id] = [t for t in self.jssp_instance_data.get_job(rand_job_id).get_operations() if\n t.get_sequence() == rand_operation.get_sequence() + 1]\n\n\n operation_list.append([rand_job_id, rand_operation.get_operation_id(), rand_operation.get_sequence(), rand_machine]) # chromosome representation \n return Solution(self.jssp_instance_data, np.array(operation_list, dtype=np.intc))", "def get_params(bestP, stepsize, params):\n j = 0\n allParams = np.zeros(len(stepsize))\n for i in np.arange(len(stepsize)):\n if stepsize[i] > 0.0:\n allParams[i] = bestP[j]\n j +=1\n else:\n allParams[i] = params[i]\n\n # Loop again to fill in where we have negative step size\n for i in np.arange(len(stepsize)):\n if stepsize[i] < 0.0:\n allParams[i] = allParams[int(-stepsize[i]-1)]\n \n return allParams", "def _get_state_sizes(self):\n an_iname = self.node_list[0]\n an_inode = self.builder.nodes[an_iname]\n \n islot = 0\n parent_oslot_pair = 0\n oslot = 0\n an_ishape = an_inode.oshapes[self.node_list_tuples[islot][parent_oslot_pair][oslot]]\n \n return [[an_ishape[-1]]]", "def compute_score_fast(verbose=1):\n res = []\n\n batch = math.ceil(len(train) / LINEAR_ASSIGNMENT_SEGMENT_SIZE)\n for start in range(0, len(train), batch):\n end = min(len(train), start + batch)\n train_batch = train[start:end]\n\n features = branch_model.predict_generator(FeatureGen(train_batch, verbose=verbose), max_queue_size=12, workers=6, verbose=0)\n score = head_model.predict_generator(ScoreGen(features, verbose=verbose), max_queue_size=12, workers=6, verbose=0)\n score = score_reshape(score, features)\n\n res.append(score)\n\n return res", "def get_blocks(index):\r\n #call with -1 to get full blocklist\r\n #the reason this is a function instead of just a list is that originally\r\n #i had plans to support dynamic tilesets, for example if only a certain\r\n #number of each tile were available. in the end this didnt happen though\r\n all_blocks = [\r\n [[0,0,0],[1,1,1],[0,0,0]], #0 - (horizontal passage)\r\n [[0,1,0],[0,1,0],[0,1,0]], #1 | (vertical passage)\r\n \r\n [[0,0,0],[1,1,0],[0,1,0]], #2 >v various L-junctions\r\n [[0,1,0],[1,1,0],[0,0,0]], #3 >^\r\n [[0,0,0],[0,1,1],[0,1,0]], #4 ^>\r\n [[0,1,0],[0,1,1],[0,0,0]], #5 v>\r\n \r\n [[0,0,0],[0,0,0],[0,0,0]], #6 0 empty\r\n [[0,1,0],[1,1,1],[0,1,0]], #7 + cross\r\n \r\n [[0,1,0],[1,1,1],[0,0,0]], #8 _|_ various T-junctions\r\n [[0,0,0],[1,1,1],[0,1,0]], #9 T\r\n [[0,1,0],[1,1,0],[0,1,0]], #10 -|\r\n [[0,0,0],[1,1,1],[0,0,0]]] #11 |-\r\n \r\n# [[0,1,0],[0,1,0],[0,0,0]], #12 #unsued \"dead end\" pieces\r\n# [[0,0,0],[0,1,0],[0,1,0]], #13\r\n# [[0,0,0],[0,1,1],[0,0,0]], #14\r\n# [[0,0,0],[1,1,0],[0,0,0]] ]#15\r\n if index == -1:\r\n return all_blocks\r\n else:\r\n return all_blocks[index]", "def ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(current_global_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_index,current_analysis_stage,previous_computational_time,open_mp_threads,mapping_flag,pickled_mapping_reference_model,print_to_file,filename):\n\n start_time = time.time()\n # unpickle model and build Kratos Model object\n serialized_model = pickle.loads(pickled_coarse_model)\n current_model = KratosMultiphysics.Model()\n serialized_model.Load(\"ModelSerialization\",current_model)\n del(serialized_model)\n # unpickle parameters and build Kratos Parameters object\n serialized_project_parameters = pickle.loads(pickled_coarse_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n # refine if current current_global_index > 0, adaptive refinement based on the solution of previous index\n if (current_index > 0):\n # unpickle metric and remesh refinement parameters and build Kratos Parameters objects\n serialized_custom_metric_refinement_parameters = pickle.loads(pickled_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters = pickle.loads(pickled_custom_remesh_refinement_parameters)\n current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters()\n current_custom_remesh_refinement_parameters = KratosMultiphysics.Parameters()\n serialized_custom_metric_refinement_parameters.Load(\"MetricRefinementParametersSerialization\",current_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters.Load(\"RemeshRefinementParametersSerialization\",current_custom_remesh_refinement_parameters)\n del(serialized_custom_metric_refinement_parameters,serialized_custom_remesh_refinement_parameters)\n # refine the model Kratos object\n adaptive_refinement_manager = AdaptiveRefinement(current_index,current_model,current_project_parameters,current_custom_metric_refinement_parameters,current_custom_remesh_refinement_parameters)\n refined_model,refined_project_parameters = adaptive_refinement_manager.ComputeAdaptiveRefinement()\n current_model = refined_model\n del(refined_model,refined_project_parameters)\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index\n if (current_index == current_global_index):\n simulation.is_current_index_maximum_index = True\n else:\n simulation.is_current_index_maximum_index = False\n # mapping if in current finest level and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True and current_index == current_global_index):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True and current_index == current_global_index):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n # save model and parameters as MpiSerializer Kratos objects\n serialized_finer_model = KratosMultiphysics.MpiSerializer()\n serialized_finer_model.Save(\"ModelSerialization\",simulation.model)\n # pickle model and parameters\n pickled_finer_model = pickle.dumps(serialized_finer_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,pickled_finer_model,computational_time", "def __stage2(self, img, total_boxes, stage_status: StageStatus):\r\n\r\n num_boxes = total_boxes.shape[0]\r\n if num_boxes == 0:\r\n return total_boxes, stage_status\r\n\r\n # second stage\r\n tempimg = np.zeros(shape=(24, 24, 3, num_boxes))\r\n\r\n for k in range(0, num_boxes):\r\n tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))\r\n\r\n tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \\\r\n img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :]\r\n\r\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\r\n tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)\r\n\r\n else:\r\n return np.empty(shape=(0,)), stage_status\r\n\r\n tempimg = (tempimg - 127.5) * 0.0078125\r\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\r\n\r\n out = self._rnet.run(tempimg1)\r\n\r\n out0 = np.transpose(out[0])\r\n out1 = np.transpose(out[1])\r\n\r\n score = out1[1, :]\r\n\r\n ipass = np.where(score > self._steps_threshold[1])\r\n\r\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\r\n\r\n mv = out0[:, ipass[0]]\r\n\r\n if total_boxes.shape[0] > 0:\r\n pick = self.__nms(total_boxes, 0.7, 'Union')\r\n total_boxes = total_boxes[pick, :]\r\n total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))\r\n total_boxes = self.__rerec(total_boxes.copy())\r\n\r\n return total_boxes, stage_status", "def search_minimum_coloring(self,alpha,Beta):\n bestSol=[]\n bestK=0\n k= self.g.n\n iter = 0\n global encore\n encore = True\n timer = threading.Timer(200, findeboucle)\n timer.start()\n while(encore):\n tabus_search = self.compute_solution(k,alpha,Beta)\n if(tabus_search[1]==0):\n bestSol= copyMatrix(tabus_search[0])\n #tmax=tabus_search[2]\n bestK=k\n k=k-1\n return(bestK,bestSol)", "def action(self):\n\n # assume the smart opponent can always choose the best step\n # Depth First Search\n steps = 2\n stack = [(self.game_in_head, (), 0)]\n maxmin = None\n good_paths = []\n\n while len(stack) > 0:\n parent_node, path, score = stack.pop(-1)\n if len(path) >= steps*2:\n \n # leaf node in the search tree\n if maxmin is None:\n maxmin = score\n good_paths.append(path)\n elif maxmin == score:\n good_paths.append(path)\n elif maxmin < score:\n maxmin = score\n good_paths.clear()\n good_paths.append(path)\n else:\n # root node, find its leaves\n children_nodes = self.one_step_infe(parent_node, path, score)\n stack += children_nodes\n\n path_dec = random.choice(good_paths) \n if self.colour == 'upper':\n return path_dec[0] \n elif self.colour == 'lower':\n return path_dec[1]", "def _form_computation_graph(self, idx):\n _list, _set = list, set\n if type(idx) is int:\n node_layers = [np.array([idx], dtype=np.int64)]\n elif type(idx) is list:\n node_layers = [np.array(idx, dtype=np.int64)]\n\n for _ in range(self.n_layers):\n prev = node_layers[-1]\n arr = [node for node in prev]\n arr.extend([e[0] for node in arr for e in self.nbrs_t[node]])\n arr = np.array(_list(_set(arr)), dtype=np.int64)\n node_layers.append(arr)\n node_layers.reverse()\n\n mappings = [{j: i for (i, j) in enumerate(arr)} for arr in node_layers]\n\n return node_layers, mappings", "def build(width, height, depth, classes, stages, filters, include_top, pooling,\n reg=1e-3, bnEps=2e-5, bnMom=0.0):\n inputShape = (height, width, depth)\n chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n inputs = Input(shape=inputShape)\n\n\n # block 1 (initial conv block)\n x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)\n x = Conv2D(64, (7,7), use_bias=False, strides=(2,2),\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(reg))(x)\n x = BatchNormalization(axis=chanDim, name=\"bn_conv1\")(x)\n x = Activation(\"relu\")(x)\n x = ZeroPadding2D(padding=((1,1), (1,1)), name=\"pool1_pad\")(x)\n x = MaxPooling2D(3, strides=2)(x)\n\n for i in range(0, len(stages)):\n stride = (1,1) if i == 0 else (2,2) # block 2 (projection block) w stride(1,1)\n\n print(\"Stage {}, Stride={}\".format(i, stride))\n x = SEResNet.residual_module(x, filters[i+1], stride,\n chanDim=chanDim, red=True, bnEps=bnEps, bnMom=bnMom)\n for j in range(0, stages[i] + 1): #stacking res block to each depth layer\n x = SEResNet.residual_module(x, filters[i+1], stride=(1,1),\n chanDim=chanDim, bnEps=bnEps,\n bnMom=bnMom)\n x = BatchNormalization(axis=chanDim, epsilon=bnEps,\n momentum=bnMom)(x)\n x = Activation(\"relu\")(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, use_bias=False, kernel_regularizer=l2(reg),\n activation='softmax')(x)\n else:\n if pooling == 'avg':\n print(\"Adding average pool\")\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(inputs=inputs, outputs=x, name=\"SEResNet\")\n return model", "def standard_optimize_phases():\n return [\n annotate_downstream_side_inputs,\n annotate_stateful_dofns_as_roots,\n fix_side_input_pcoll_coders,\n pack_combiners,\n lift_combiners,\n expand_sdf,\n fix_flatten_coders,\n # sink_flattens,\n greedily_fuse,\n read_to_impulse,\n extract_impulse_stages,\n remove_data_plane_ops,\n sort_stages,\n ]", "def best_last_option(self):\n \n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get all possible blocks to make a move in\n winning_blocks = board.get_winning_blocks(affinity)\n print('total winning blocks:'+str(len(winning_blocks)))\n best_blocks = []\n best_block = None\n\n # find the largest blocks to place a stone in\n for block in winning_blocks:\n if affinity == BLUE_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n elif len(block.blue) > len(best_blocks[0].blue):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.blue) == len(best_blocks[0].blue):\n best_blocks.append(block)\n elif affinity ==RED_TILE():\n if len(best_blocks) == 0: best_blocks.append(block)\n if len(block.red) > len(best_blocks[0].red):\n best_blocks = []\n best_blocks.append(block)\n elif len(block.red) == len(best_blocks[0].red):\n best_blocks.append(block)\n\n # find the best block to place a stone in\n for block in best_blocks:\n if best_block is None: best_block = block \n elif block.tiles[0][0] <= best_block.tiles[0][0]: \n if (block.tiles[0][1] != block.tiles[1][1]):\n if block.direction == 'vertical':\n if block.tiles[WINNING_ROW_SIZE()-1][1] >= best_block.tiles[WINNING_ROW_SIZE()-1][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block\n else:\n if block.tiles[0][1] >= best_block.tiles[0][1]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n else:\n if block.tiles[0][1] >= best_block.tiles[0][1] and block.tiles[1][0] <= best_block.tiles[1][0]:\n if affinity == RED_TILE(): \n if len(block.red) >= len(best_block.red):\n print('considered block:'+str(block.tiles))\n best_block = block \n if affinity == BLUE_TILE(): \n if len(block.blue) >= len(best_block.blue):\n print('considered block:'+str(block.tiles))\n best_block = block \n\n # find the best move to make out of the best block \n # print('best block:'+str(best_block.tiles))\n best_move = (7,-1)\n for tile_i in range(len(best_block.tiles)):\n tile = best_block.tiles[tile_i]\n next_tile = None\n prev_tile = None \n if tile_i+1 in range(len(best_block.tiles)):\n next_tile = best_block.tiles[tile_i+1]\n if tile_i-1 in range(len(best_block.tiles)):\n prev_tile = best_block.tiles[tile_i-1]\n if board.get_tile(tile[0],tile[1]) == BLANK_TILE():\n if prev_tile is not None and next_tile is None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is None:\n if board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n elif next_tile is not None and prev_tile is not None:\n if board.get_tile(prev_tile[0],prev_tile[1]) == affinity or \\\n board.get_tile(next_tile[0],next_tile[1]) == affinity:\n if tile[0] <= best_move[0]: \n if tile[1] >= tile[1]:\n best_move = tile \n \n return best_move", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def search(self, is_max, possible_moves, state, depth, alpha, beta):\n temp_state = state.deepcopy()\n best_move = None\n best_move_val = float('-inf') if is_max else float('inf')\n \n for move in possible_moves:\n for to in move['to']:\n \n if time() > self.thinking_time:\n return best_move, best_move_val\n \n temp_state.board.move_pawn(move['from'], to)\n temp_state.next_turn()\n _, val = self.minimax(temp_state, not(is_max), depth+1, alpha, beta)\n \n temp_state.board.move_pawn(to, move['from'])\n temp_state.undo_turn()\n \n if is_max and val > best_move_val:\n alpha = max(val, alpha)\n best_move_val = val\n best_move = (move['from'], to)\n \n if not(is_max) and val < best_move_val:\n beta = min(val, beta)\n best_move_val = val\n best_move = (move['from'], to)\n \n if beta <= alpha: #pruning\n return best_move, best_move_val\n \n return best_move, best_move_val", "def big_fun_search(game, grid_size, pokemon_locations, index):\n queue = [index]\n discovered = [index]\n visible = []\n\n if game[index] == FLAG:\n \treturn queue\n\n number = number_at_cell(game, pokemon_locations, grid_size, index)\n if number != 0:\n return queue\n\n while queue:\n node = queue.pop()\n for neighbour in neighbour_directions(node, grid_size):\n if neighbour in discovered or neighbour is None:\n continue\n\n discovered.append(neighbour)\n if game[neighbour] != FLAG:\n number = number_at_cell(game, pokemon_locations, grid_size, neighbour)\n if number == 0:\n queue.append(neighbour)\n visible.append(neighbour)\n return visible", "def get_topology(init_height, map_size, max_height):\n num_features = math.ceil(map_size[0] / FEATURE_SIZE)\n generators = [create_valley, create_hill, create_plateau]\n previous = [random.randrange(len(generators)), random.randrange(len(generators))]\n feature_points = []\n for i in range(num_features):\n while True:\n idx = random.randrange(len(generators))\n # do not repeat topology more than once\n if previous.count(idx) != 2:\n break\n new_points = generators[idx](map_size[1], init_height, max_height, FEATURE_SIZE)\n for idp in range(len(new_points)):\n # as the feature points are generated in local coordinates, shift them on the x axis to the correct part\n # of the terrain.\n new_points[idp] = (new_points[idp][0] + i * FEATURE_SIZE, new_points[idp][1])\n feature_points.extend(new_points)\n previous.pop(0)\n previous.append(idx)\n\n return feature_points", "def bestalg(self, dimfun):\n if self._bestalg is None:\n self._bestalg = bb.bestalg.generate(self.algds)\n return self._bestalg[dimfun] if dimfun is not None else self._bestalg", "def optimal_min(board):\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is positive infinity\n global_optimum = [None, math.inf]\n\n for action in available_actions:\n # Anticipates optimal adversarial moves.\n local_optimum = optimal_max(result(board, action))\n\n if global_optimum[1] >= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n # For this problem we will be reusing the majority of our work from question 2, but we will be\n # implementing alpha-beta pruning on top of our existing minimax infrastructure\n actionList = gameState.getLegalActions(0)\n pacmanAgentIndex = 0\n ghostAgentIndices = list(range(1,gameState.getNumAgents())) # List of each agent index for looping\n count = util.Counter()\n agentEnd = gameState.getNumAgents()-1 # Last agent in the list\n\n def maximizer(curState, agentIndex, alpha, beta, depth):\n\n ghostActions = curState.getLegalActions(agentIndex)\n maxDepth = self.depth # Quantifying the end of the tree so we know when we reached a leaf node\n weight = -99999999 # Worst case starting value to be changed in the code\n if depth == maxDepth: # If we are at a leaf node\n return self.evaluationFunction(curState) # evaluate the state of this leaf node\n # Otherwise, we progress the tree until the above condition is reached\n if len(ghostActions) != 0:\n for x in ghostActions:\n if weight >= minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth):\n weight = weight\n else:\n weight = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight > beta:\n return weight\n if alpha < weight:\n alpha = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n def minimizer(curState, agentIndex, alpha, beta, depth):\n ghostActions = curState.getLegalActions(agentIndex)\n weight = 999999999 # Worst case starting value to be changed in the code\n if len(ghostActions) != 0:\n if agentIndex == agentEnd: # If we've reached the last ghost, we maximise\n for x in ghostActions: # For each legal action in the current position\n temp = maximizer(curState.generateSuccessor(agentIndex, x), pacmanAgentIndex, alpha, beta, depth+1)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n else: # Otherwise, we continue to minimize\n for x in ghostActions: # For each legal action in the current position\n temp = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n endWeight = -999999999\n alpha = -999999999\n beta = 999999999\n\n # Executing the minimizer for all possible actions\n for x in actionList:\n tempState = gameState.generateSuccessor(pacmanAgentIndex,x)\n endWeight = minimizer(tempState, 1, alpha, beta, 0,)\n count[x] = endWeight\n if alpha < endWeight:\n alpha = endWeight\n # print('HELLO THERE')\n # print(count)\n return count.argMax()", "def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best", "def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"", "def find_best_solution_and_score(self):\r\n best_score = MAXSIZE\r\n best_solution = self.simulation.solutions[0]\r\n for solution in self.simulation.solutions:\r\n score = self.simulation.fitting_function.fit_score(solution)\r\n if score < best_score:\r\n best_score = score\r\n best_solution = solution\r\n return best_solution, best_score", "def get_solution(data, manager, routing, solution):\n route_list = {}\n max_route_distance = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n route_list[vehicle_id] = []\n route_distance = 0\n while not routing.IsEnd(index):\n route_list[vehicle_id].append(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n route_list[vehicle_id].append(manager.IndexToNode(index))\n #_set_route_distance(vehicle_id, route_distance, route_list)\n return route_list", "def get_candidate_layouts(neighbors_by_tile, shape):\n candidate_layouts = []\n for tile_id, tile in neighbors_by_tile.items():\n candidates_for_root = generate_candidates_for_root(shape, neighbors_by_tile)\n candidate_layouts.extend(candidates_for_root)\n return candidate_layouts", "def compute_heuristic(self, state):\n if self._shape_reward_mode == \"optimal\":\n problem = self.problems[self._problem_idx]\n\n # Add action literals to state to enable planning\n state_lits = set(state.literals)\n action_lits = set(\n self.action_space.all_ground_literals(state, valid_only=False))\n state_lits |= action_lits\n\n problem_path = \"\"\n try:\n # generate a temporary file to hand over to the external planner\n fd, problem_path = tempfile.mkstemp(dir=TMP_PDDL_DIR, text=True)\n with os.fdopen(fd, \"w\") as f:\n problem.write(f, initial_state=state_lits, fast_downward_order=True)\n\n return get_fd_optimal_plan_cost(\n self.domain.domain_fname, problem_path)\n finally:\n try:\n os.remove(problem_path)\n except FileNotFoundError:\n pass\n else:\n return self._heuristic(state)", "def calculateOptimal(self) -> (list, int):\n\t\tcombinations = list(itertools.product(*self.clusters))\n\t\tmin_dist = 1000000\n\t\tmin_combination = None\n\t\tfor combination in combinations:\n\t\t\tdist = super().step(combination)\n\t\t\tif(dist < min_dist):\n\t\t\t\tmin_dist = dist\n\t\t\t\tmin_combination = combination\n\t\treturn (min_combination, min_dist)", "def get_sub_combinations(maxop):\n combo = collections.defaultdict(list)\n for numops in range(maxop+1):\n if numops:\n combo[numops, 1].append((numops-1,))\n for op1 in range(numops):\n combo[numops, 2].append((op1, numops - op1 - 1))\n for op2 in range(numops - op1):\n combo[numops, 3].append((op1, op2, numops - op1 - op2 - 1))\n return combo", "def minimaxLocalSearch(gamestate, depth, timeTotal, alpha, beta, maxEntity):\n bonus = 0\n isTerminalState = gamestate.board.checkTerminalState(gamestate.currentPlayer.noPlayer)\n # Basis Rekursif\n if ((depth == 0) or (time.time() > timeTotal) or (isTerminalState)):\n if (isTerminalState) and (gamestate.currentPlayer.noPlayer == maxEntity):\n bonus = 10\n elif (isTerminalState) and (gamestate.currentPlayer.noPlayer != maxEntity):\n bonus = -10\n return gamestate, U_Function(gamestate.currentPlayer, gamestate.oppositePlayer, gamestate.board.size, maxEntity) + bonus\n\n # Rekurens\n if (gamestate.currentPlayer.noPlayer == maxEntity):\n # Choose the maximum utility of the state\n # Iterate all pion and its possible moves\n maxGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n maxValue = -math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Choose the best move from local search heuristic\n if (len(all_possible_moves) > 0):\n move = getBestMove(all_possible_moves, gamestate)\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n \n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimaxLocalSearch(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old max value\n if (utility > maxValue):\n maxValue = utility\n maxGameState = newGameState\n \n alpha = max(alpha, maxValue)\n if (beta <= alpha):\n return maxGameState, maxValue\n\n return maxGameState, maxValue\n\n else:\n # Choose the minimum utility of the state\n minGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n minValue = math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n if (len(all_possible_moves) > 0):\n # Choose the best move from local search heuristic\n move = getBestMove(all_possible_moves, gamestate)\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimaxLocalSearch(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old min value\n if (utility < minValue):\n minValue = utility\n minGameState = newGameState\n \n beta = min(beta, minValue)\n if (beta <= alpha):\n return minGameState, minValue\n \n return minGameState, minValue", "def _get_sub_pgs(self, tensor_parallel_size: int):\n tp_ranks: List[List[int]] = []\n fsdp_ranks: List[List[int]] = []\n for rank in range(self.world_size):\n tp_idx = rank // tensor_parallel_size\n if len(tp_ranks) <= tp_idx:\n tp_ranks.append([])\n tp_ranks[tp_idx].append(rank)\n fsdp_idx = rank % tensor_parallel_size\n if len(fsdp_ranks) <= fsdp_idx:\n fsdp_ranks.append([])\n fsdp_ranks[fsdp_idx].append(rank)\n tp_pgs = [dist.new_group(ranks) for ranks in tp_ranks]\n fsdp_pgs = [dist.new_group(ranks) for ranks in fsdp_ranks]\n tp_pg = tp_pgs[self.rank // tensor_parallel_size]\n fsdp_pg = fsdp_pgs[self.rank % tensor_parallel_size]\n return tp_pg, fsdp_pg", "def recursive_minimax(game: Any) -> Any:\n moves_scores = []\n moves = game.current_state.get_possible_moves()\n\n for move in moves:\n g = copy.deepcopy(game)\n s = g.current_state.make_move(move)\n g.current_state = s\n\n moves_scores.append(-1*recursive_helper(g))\n return moves[moves_scores.index(max(moves_scores))]", "def part1_2(puzzle_input):\n [initial_state_string, configurations] = puzzle_input.split('\\n\\n')\n initial_state = re.sub('initial state: ', '', initial_state_string)\n rules_arr = configurations.split('\\n')\n rules = [re.split(' => ', line) for line in rules_arr]\n rules = {t[0]: t[1] for t in rules}\n current_state = '..........' + initial_state + '...............................................................................................................................................'\n for i in range(100): # After 100th cycle, the only change is that there is a '#' that shifts right\n next_generation_string = \"\"\n for index, pot in enumerate(current_state):\n if index == 0:\n temp_string = '..' + current_state[:3]\n elif index == 1:\n temp_string = '.' + current_state[:4]\n elif index == len(current_state) - 2:\n temp_string = current_state[-4:] + '.'\n elif index == len(current_state) - 1:\n temp_string = current_state[-3:] + '..'\n else:\n temp_string = current_state[index-2:index+3]\n if temp_string in rules:\n next_generation_string += rules[temp_string]\n else:\n next_generation_string += pot\n current_state = next_generation_string\n\n # For part 1\n part1_sum = 0\n if i == 19:\n for index, pot in enumerate(current_state):\n if pot == '#':\n part1_sum += index - 10\n print(part1_sum)\n\n # Part 2\n part2_sum = 0\n for index, pot in enumerate(current_state):\n if pot == '#':\n part2_sum += index - 10 + 50000000000 - 100\n print(part2_sum)", "def split(self, elIndicesDict, maxSubTreeSize=None, numSubTrees=None, verbosity=0):\n #dbList = self.generate_circuit_list()\n tm = _time.time()\n printer = _VerbosityPrinter.build_printer(verbosity)\n\n if (maxSubTreeSize is None and numSubTrees is None) or \\\n (maxSubTreeSize is not None and numSubTrees is not None):\n raise ValueError(\"Specify *either* maxSubTreeSize or numSubTrees\")\n if numSubTrees is not None and numSubTrees <= 0:\n raise ValueError(\"EvalTree split() error: numSubTrees must be > 0!\")\n\n #Don't split at all if it's unnecessary\n if maxSubTreeSize is None or len(self) < maxSubTreeSize:\n if numSubTrees is None or numSubTrees == 1: return elIndicesDict\n\n self.subTrees = []\n evalOrder = self.get_evaluation_order()\n printer.log(\"EvalTree.split done initial prep in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n\n def nocache_create_equal_size_subtrees():\n \"\"\" A shortcut for special case when there is no cache so each\n circuit can be evaluated independently \"\"\"\n N = len(self)\n subTrees = [set(range(i, N, numSubTrees)) for i in range(numSubTrees)]\n totalCost = N\n return subTrees, totalCost\n\n def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n \"\"\"\n Find a set of subtrees by iterating through the tree\n and placing \"break\" points when the cost of evaluating the\n subtree exceeds some 'maxCost'. This ensure ~ equal cost\n trees, but doesn't ensure any particular number of them.\n\n maxCostRate can be set to implement a varying maxCost\n over the course of the iteration.\n \"\"\"\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost\n\n ##################################################################\n # Part I: find a list of where the current tree should be broken #\n ##################################################################\n\n if numSubTrees is not None and self.cache_size() == 0:\n #print(\"Split: EQUAL SUBTREES!\") #REMOVE\n subTreeSetList, totalCost = nocache_create_equal_size_subtrees()\n #printer.log(\"EvalTree.split PT1 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n elif numSubTrees is not None:\n\n #OLD METHOD: optimize max-cost to get the right number of trees\n # (but this can yield trees with unequal lengths or cache sizes,\n # which is what we're often after for memory reasons)\n costMet = \"size\" # cost metric\n if costMet == \"applies\":\n maxCost = self.get_num_applies() / numSubTrees\n else: maxCost = len(self) / numSubTrees\n maxCostLowerBound, maxCostUpperBound = maxCost, None\n maxCostRate, rateLowerBound, rateUpperBound = 0, -1.0, +1.0\n #OLD (& incorrect) vals were 0, -1.0/len(self), +1.0/len(self),\n # though current -1,1 vals are probably overly conservative...\n resultingSubtrees = numSubTrees + 1 # just to prime the loop\n iteration = 0\n\n #Iterate until the desired number of subtrees have been found.\n while resultingSubtrees != numSubTrees:\n subTreeSetList, totalCost = create_subtrees(maxCost, maxCostRate, costMet)\n resultingSubtrees = len(subTreeSetList)\n #print(\"DEBUG: resulting numTrees = %d (cost %g) w/maxCost = %g [%s,%s] & rate = %g [%g,%g]\" % \\\n # (resultingSubtrees, totalCost, maxCost, str(maxCostLowerBound), str(maxCostUpperBound),\n # maxCostRate, rateLowerBound, rateUpperBound))\n\n #DEBUG\n #totalSet = set()\n #for s in subTreeSetList:\n # totalSet.update(s)\n #print(\"DB: total set length = \",len(totalSet))\n #assert(len(totalSet) == len(self))\n\n #Perform binary search in maxCost then maxCostRate to find\n # desired final subtree count.\n if maxCostUpperBound is None or abs(maxCostLowerBound - maxCostUpperBound) > 1.0:\n # coarse adjust => vary maxCost\n last_maxCost = maxCost\n if resultingSubtrees <= numSubTrees: # too few trees: reduce maxCost\n maxCost = (maxCost + maxCostLowerBound) / 2.0\n maxCostUpperBound = last_maxCost\n else: # too many trees: raise maxCost\n if maxCostUpperBound is None:\n maxCost = totalCost # / numSubTrees\n else:\n maxCost = (maxCost + maxCostUpperBound) / 2.0\n maxCostLowerBound = last_maxCost\n else:\n # fine adjust => vary maxCostRate\n last_maxRate = maxCostRate\n if resultingSubtrees <= numSubTrees: # too few trees reduce maxCostRate\n maxCostRate = (maxCostRate + rateLowerBound) / 2.0\n rateUpperBound = last_maxRate\n else: # too many trees: increase maxCostRate\n maxCostRate = (maxCostRate + rateUpperBound) / 2.0\n rateLowerBound = last_maxRate\n\n iteration += 1\n assert(iteration < 100), \"Unsuccessful splitting for 100 iterations!\"\n\n else: # maxSubTreeSize is not None\n subTreeSetList, totalCost = create_subtrees(\n maxSubTreeSize, maxCostRate=0, costMetric=\"size\")\n\n ##########################################################\n # Part II: create subtrees from index sets\n ##########################################################\n # (common logic provided by base class up to providing a few helper fns)\n\n def permute_parent_element(perm, el):\n \"\"\"Applies a permutation to an element of the tree \"\"\"\n # perm[oldIndex] = newIndex\n #return (perm[el[0]] if (el[0] is not None) else None, el[1], el[2])\n return (el[0], el[1], el[2]) # no need to permute the cache element ([0])\n\n def create_subtree(parentIndices, numFinal, fullEvalOrder, sliceIntoParentsFinalArray, parentTree):\n \"\"\"\n Creates a subtree given requisite information:\n\n Parameters\n ----------\n parentIndices : list\n The ordered list of (parent-tree) indices to be included in\n the created subtree.\n\n numFinal : int\n The number of \"final\" elements, i.e. those that are used to\n construct the final array of results and not just an intermediate.\n The first numFinal elemements of parentIndices are \"final\", and\n 'sliceIntoParentsFinalArray' tells you which final indices of\n the parent they map to.\n\n fullEvalOrder : list\n A list of the integers between 0 and len(parentIndices)-1 which\n gives the evaluation order of the subtree *including* evaluation\n of any initial elements.\n\n sliceIntoParentsFinalArray : slice\n Described above - map between to-be-created subtree's final\n elements and parent-tree indices.\n\n parentTree : EvalTree\n The parent tree itself.\n \"\"\"\n #t0 = _time.time() #REMOVE\n subTree = MapEvalTree()\n subTree.myFinalToParentFinalMap = sliceIntoParentsFinalArray\n subTree.num_final_strs = numFinal\n subTree[:] = [None] * len(parentIndices)\n\n curCacheSize = 0\n subTreeCacheIndices = {}\n\n for ik in fullEvalOrder: # includes any initial indices\n k = parentIndices[ik] # original tree index\n\n oStart, remainder, oCache = self[k] # original tree data\n\n if oCache is not None: # this element was in parent's cache,\n subTreeCacheIndices[oCache] = curCacheSize # maps parent's cache indices to subtree's\n iCache = curCacheSize\n curCacheSize += 1\n else:\n iCache = None\n\n iStart = None if (oStart is None) else \\\n subTreeCacheIndices[oStart]\n subTree.eval_order.append(ik)\n\n assert(subTree[ik] is None)\n subTree[ik] = (iStart, remainder, iCache)\n\n #t1 = _time.time() #REMOVE\n subTree.cachesize = curCacheSize\n subTree.parentIndexMap = parentIndices # parent index of each subtree index\n subTree.simplified_circuit_spamTuples = [self.simplified_circuit_spamTuples[k]\n for k in _slct.indices(subTree.myFinalToParentFinalMap)]\n #subTree._compute_finalStringToEls() #depends on simplified_circuit_spamTuples\n\n #t2 = _time.time() #REMOVE\n final_el_startstops = []; i = 0\n for spamTuples in parentTree.simplified_circuit_spamTuples:\n final_el_startstops.append((i, i + len(spamTuples)))\n i += len(spamTuples)\n #t3 = _time.time() #REMOVE\n if len(_slct.indices(subTree.myFinalToParentFinalMap)) > 0:\n subTree.myFinalElsToParentFinalElsMap = _np.concatenate(\n [_np.arange(*final_el_startstops[k])\n for k in _slct.indices(subTree.myFinalToParentFinalMap)])\n #Note: myFinalToParentFinalMap maps only between *final* elements\n # (which are what is held in simplified_circuit_spamTuples)\n else: # no final elements (a \"dummy\" tree, useful just to keep extra procs busy)\n subTree.myFinalElsToParentFinalElsMap = _np.arange(0, 0) # empty array\n\n #t4 = _time.time() #REMOVE\n subTree.num_final_els = sum([len(v) for v in subTree.simplified_circuit_spamTuples])\n #t5 = _time.time() #REMOVE\n subTree.recompute_spamtuple_indices(bLocal=False)\n #t6 = _time.time() #REMOVE\n\n subTree.trim_nonfinal_els()\n #t7 = _time.time() #REMOVE\n subTree.opLabels = self._get_opLabels(subTree.generate_circuit_list(permute=False))\n #t8 = _time.time() #REMOVE\n # print(\"DB: create_subtree timing: \"\n # \"t1=%.3fs, t2=%.3fs, t3=%.3fs, t4=%.3fs, t5=%.3fs, t6=%.3fs, t7=%.3fs, t8=%.3fs\"\n # % (t1-t0,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))\n\n return subTree\n\n #printer.log(\"EvalTree.split PT2 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n updated_elIndices = self._finish_split(elIndicesDict, subTreeSetList,\n permute_parent_element, create_subtree,\n all_final=bool(self.cache_size() == 0))\n #printer.log(\"EvalTree.split PT3 %.1fs\" %\n # (_time.time()-tm)); tm = _time.time() #REMOVE\n\n printer.log(\"EvalTree.split done second pass in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n return updated_elIndices", "def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float:\n # TODO a nontrivial consistent heuristic\n \n if not state.dirty_locations:\n return 0\n \n best_start = 0 # best dirty tile to start from\n best_cost = INF # cost of the path from the above start tile\n\n for i in range(len(state.dirty_locations)):\n estimate_cost = 0\n lowest_cost = INF\n closest_dirty = 0\n dirty_locations = list(state.dirty_locations)\n current_pos = dirty_locations.pop(i)\n\n # find the shortest cost solution path from this starting tile\n while dirty_locations:\n for j in range(len(dirty_locations)):\n manhattan = abs(current_pos.row - dirty_locations[j].row) + abs(current_pos.col - dirty_locations[j].col)\n if manhattan < lowest_cost:\n lowest_cost = manhattan\n closest_dirty = j\n estimate_cost += lowest_cost\n current_pos = dirty_locations.pop(closest_dirty)\n lowest_cost = INF\n # if estimated path cost is cheaper than best path cost so far, replace best_cost and best_start\n if estimate_cost < best_cost:\n best_cost = estimate_cost\n best_start = i\n # if estimated path cost and best path cost so far are equal, tiebreak with proximity to start tile\n if estimate_cost == best_cost:\n current_pos = state.position\n dist_to_prev_best = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)\n dist_to_i = abs(current_pos.row - state.dirty_locations[i].row) + abs(current_pos.col - state.dirty_locations[i].col)\n if dist_to_i < dist_to_prev_best:\n best_start = i\n \n\n current_pos = state.position\n # Calculate distance to the best start tile\n dist_to_start = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)\n # Returned heuristic is the sum of distance to the start tile and estimated cost from said tile\n return dist_to_start + best_cost", "def __find_best_split(self, x, y):\n data = np.transpose(np.vstack((np.transpose(x), y)))\n num_features = data.shape[1] - 1\n\n # initialise splitting rule components\n integer_splitting_rule = None\n feature_index_to_split = None\n max_info_gain = 0\n\n # iterate over all the features and find best splits within these\n for feature in range(num_features):\n info_gain, split_int = self.__find_best_split_in_feature(\n data[:, [feature, -1]])\n if info_gain is None:\n continue\n # update max info gain so far as it iterates over features\n if info_gain > max_info_gain:\n max_info_gain = info_gain\n feature_index_to_split = feature\n integer_splitting_rule = int(split_int)\n\n return feature_index_to_split, integer_splitting_rule", "def run(self):\n best_score = float('inf')\n best_route = None\n best_nr_iterations = None\n best_tabu_list_size = None\n for i in range(self.range_iterations_start, self.range_iterations_end, 10):\n for j in range(self.range_tabu_list_start, self.range_tabu_list_end):\n print('testing for nr_iterations', i, ' and tabu list size', j)\n self.hc.generate_initial_solution(use_seed=True)\n score, route, iteration = self.hc.solve(tabu=self.tabu, with_time_windows=self.with_time_windows,\n nr_iterations=i, tabu_size=j,\n allow_infeasibilites=self.allow_infeasibilites)\n\n if score < best_score:\n best_score = score\n best_route = route\n best_nr_iterations = i\n best_tabu_list_size = j\n\n print('best results with sore', best_score, best_nr_iterations, best_tabu_list_size )\n return best_score, best_route, best_tabu_list_size", "def _minimize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def getMiniBatch(self, batch_size):\r\n indices = self.get_indices(batch_size)\r\n states, actions, rewards, next_states, is_done_vec = [], [], [], [], []\r\n for ER_i in self.ER_array:\r\n agent_experiences = [ER_i.buffer[index] for index in indices] # extract experiences\r\n # ---- organize the experiences so that the experiences of each agent are aligned ----\r\n states_i, actions_i, rewards_i, next_states_i, is_done_vec_i = [], [], [], [], []\r\n for experience in agent_experiences:\r\n states_i.append(experience.state)\r\n actions_i.append(experience.action)\r\n rewards_i.append(experience.reward)\r\n next_states_i.append(experience.next_state)\r\n is_done_vec_i.append(experience.done)\r\n states.append(states_i)\r\n actions.append(actions_i)\r\n rewards.append(rewards_i)\r\n next_states.append(next_states_i)\r\n is_done_vec.append(is_done_vec_i)\r\n return states, actions, rewards, next_states, is_done_vec", "def spit(self):\n idxs = np.arange(self.total_tuples)\n return [self.recurse_index_state(copy.deepcopy(self.state_buffer), idxs), self.recurse_index_state(copy.deepcopy(self.state2_buffer), idxs), self.action_buffer[idxs], self.adv_buffer[idxs], \n self.rtg_buffer[idxs], self.logp_buffer[idxs], self.valid_actions_buffer[idxs], self.rew_buffer[idxs], self.done_buffer[idxs]]", "def optimal_max(board):\n # Board full?\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is negative infinity\n global_optimum = [None, -math.inf]\n\n # For each move, what would opponent do next? Update best move.\n for action in available_actions:\n # Anticipates optimal adversarial moves\n local_optimum = optimal_min(result(board, action))\n\n # Compares local vs global optima\n if global_optimum[1] <= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def extract_mesh(self,criteria):\n assert(isinstance(criteria,list))\n # super class private cannot be accessed directly\n n = Mesh(self.get_precision())\n s_elem=set()\n s_node=set()\n s_nrml=set()\n # Gather all nrmls,nodes and elems from critia\n for e in self.elems:\n if self.elems[e][POS.TAG] in criteria:\n s_elem.add(e)\n s_node=s_node.union(set(self.elems[e][POS.NODLIST]))\n s_nrml=s_nrml.union(set(self.elems[e][POS.NRMLIST]))\n\n s_node = sorted(list(s_node))\n renum_node = {}\n for i in range(len(s_node)):\n n.nodes[i+1] = self.nodes[s_node[i]]\n renum_node[s_node[i]]=i+1\n # ============================ \n s_nrml = sorted(list(s_nrml))\n renum_nrml={}\n for i in range(len(s_nrml)):\n info = list(self.nrmls[s_nrml[i]])\n info[0] = renum_node[info[0]]\n n.nrmls[i+1] = info\n renum_nrml[s_nrml[i]]=i+1\n #\n s_elem = sorted(list(s_elem))\n for i in range(len(s_elem)):\n info = self.elems[s_elem[i]]\n nodelist = list(info[2])\n nrmlist = list(info[3])\n for j in range(info[1]):\n nodelist[j] = renum_node[nodelist[j]]\n nrmlist[j] = renum_nrml[nrmlist[j]]\n n.elems[i+1] = ['extract',info[1],tuple(nodelist),tuple(nrmlist)]\n\n n._rebuild_rev_info()\n n._recreate_avail_info()\n\n return n", "def get_optimal_move(self):\n # create the root state\n root = State(self.current_board, True, self.__machine_token, self.__human_token)\n # alpha-beta-pruning algorithm\n best_move = max_value_a_b(root, depth(root), -1000, 1000)\n # obtain the direct children.\n direct_children = get_direct_children(root, all_states_generated)\n # obtain the coordinates of the movement.\n for direct_child in direct_children:\n if direct_child.value == best_move:\n return get_coordinates(root, direct_child)", "def select_best(src_dirs, dst_dir, op_name):\n\n def _copy_file(src_path, dst_path):\n try:\n if os.path.isfile(dst_path):\n os.remove(dst_path)\n except OSError:\n pass\n\n try:\n shutil.copy(src_path, dst_path)\n except PermissionError:\n # If dst_path already exits and only has READ permission\n pass\n\n max_block_dim = 1\n max_block_dim_idx = -1\n for i, src_dir in enumerate(src_dirs):\n o_path = os.path.join(src_dir, op_name + \".o\")\n json_path = os.path.join(src_dir, op_name + \".json\")\n if os.path.isfile(o_path) and os.path.isfile(json_path):\n with open(json_path, 'r') as f:\n json_str = f.read()\n json_dict = json.loads(json_str)\n if json_dict[\"blockDim\"] >= max_block_dim:\n max_block_dim_idx = i\n max_block_dim = json_dict[\"blockDim\"]\n if max_block_dim_idx >= 0:\n o_path = os.path.join(src_dirs[max_block_dim_idx], op_name + \".o\")\n json_path = os.path.join(src_dirs[max_block_dim_idx], op_name + \".json\")\n _copy_file(o_path, os.path.join(dst_dir, op_name + \".o\"))\n _copy_file(json_path, os.path.join(dst_dir, op_name + \".json\"))\n logger.info(\"{}, best compile result dir: {}\".format(op_name, src_dirs[max_block_dim_idx]))\n return True\n logger.info(\"{}, best compile result dir not found\".format(op_name))\n return False", "def best_group(opt_result, verts):\n\ta, b = [], []\n\tfor i in xrange(len(opt_result)):\n\t\tif opt_result[i] == -1:\n\t\t\ta.append(verts[i])\n\t\telse:\n\t\t\tb.append(verts[i])\n\treturn a, b", "def greedy(constraint, indexes, m_l, parallel=False):\n\n selected = np.array([])\n plot = False\n choices = np.array(indexes)\n bar = ChargingBar(\"Calculating index set with greedy method\", max=m_l)\n\n for i in range(len(selected), m_l):\n # print(\"i = %d\" % i)\n start = time.time()\n\n def calc(node):\n return constraint(np.union1d(selected, node))\n\n if parallel:\n pickle_fix.calc = calc\n available_cores = odin.config.get(\"available_cores\", 4)\n pool = multiprocessing.Pool(processes=available_cores)\n values = pool.map(pickle_fix, choices)\n pool.close()\n else:\n # values: [float]\n values = list(map(calc, choices))\n\n greedy_choice = choices[np.argmax(values)]\n\n if plot:\n values = np.sort(values)\n oplt.plot(values)\n oplt.show()\n # current_best = np.max(values)\n\n selected = np.union1d(selected, [greedy_choice])\n choices = np.setdiff1d(choices, [greedy_choice])\n logging.debug(\"selected = %s; choice = %s; time = %.5f\" % (\n selected, greedy_choice, time.time() - start))\n bar.next()\n bar.finish()\n\n return selected", "def find_best_group(mdl_array, lpx, tcost_bar, contract_size, max_group_pos=20, exclude_gp=None, clip_maxpos=True, best_pos=None, best_shp=0):\n ndays, n = lpx.shape\n mcnt = len(mdl_array)\n best_gp = None\n if best_pos is None:\n best_pos = np.zeros((ndays,n))\n\n best_shp0 = best_shp\n for m, mdl_dict in enumerate(mdl_array):\n pick_array = mdl_dict['pick']\n for p, pick in enumerate(pick_array):\n if exclude_gp is not None and (m,p) in exclude_gp:\n continue\n (k0,k1) = pick[0]\n pos0 = best_pos.copy()\n pos0[:,k0:k1+1]+=mdl_dict['pos_scale'][:,k0:k1+1]\n pos00, pnl0, shp0 = pnl_from_pos_tcost(lpx, np.r_[pos0.flatten(),0], tcost_bar, max_group_pos, contract_size, clip_maxpos=clip_maxpos)\n\n # recover the original k0k1 position. i.e. from model_dict['pos_scale']\n pos0-=best_pos\n if shp0>best_shp0:\n print(' best shp update - ', m, p, '(%d,%d) shp:%f pnl:%f'%(k0,k1,shp0,np.sum(pnl0)))\n best_shp0 = shp0\n best_gp0 = (m,p)\n best_pos0 = pos0.copy()\n best_pnl = pnl0.copy()\n dshp = best_shp0-best_shp\n if dshp == 0:\n print('Nothing beats the given, fold')\n return None, None, None, None\n\n print('PICK: ', best_gp0, best_shp0, np.sum(best_pnl))\n return best_gp0, best_pos0, best_pnl, best_shp0", "def optimal_split(self, ratio = 0.5):\n\n if (sum(self.game_state) == 1):\n if (randint(1,100)<=50):\n return self.game_state, [0]*(self.K+1)\n else:\n return [0]*(self.K+1), self.game_state\n \n else:\n prob = LpProblem(\"Optimal split\",LpMinimize)\n A = []\n for i in range(self.K + 1):\n A += LpVariable(str(i), 0, self.game_state[i], LpInteger)\n prob += sum([2**(-(self.K - i)) * c for c, i in zip(A, range(self.K + 1))]) - ratio * self.potential(self.game_state), \"Objective function\"\n prob += sum([2**(-(self.K - i)) * c for c, i in zip(A, range(self.K + 1))]) >= ratio * self.potential(self.game_state), \"Constraint\"\n #prob.writeLP(\"test.lp\")\n prob.solve()\n Abis = [0]*(self.K+1)\n for v in prob.variables():\n Abis[int(v.name)] = round(v.varValue)\n B = [z - a for z, a in zip(self.game_state, Abis)]\n return Abis, B", "def split(self, elIndicesDict, maxSubTreeSize=None, numSubTrees=None, verbosity=0):\n #dbList = self.generate_circuit_list()\n tm = _time.time()\n printer = _VerbosityPrinter.build_printer(verbosity)\n\n if (maxSubTreeSize is None and numSubTrees is None) or \\\n (maxSubTreeSize is not None and numSubTrees is not None):\n raise ValueError(\"Specify *either* maxSubTreeSize or numSubTrees\")\n if numSubTrees is not None and numSubTrees <= 0:\n raise ValueError(\"EvalTree split() error: numSubTrees must be > 0!\")\n\n #Don't split at all if it's unnecessary\n if maxSubTreeSize is None or len(self) < maxSubTreeSize:\n if numSubTrees is None or numSubTrees == 1: return elIndicesDict\n\n self.subTrees = []\n evalOrder = self.get_evaluation_order()\n printer.log(\"EvalTree.split done initial prep in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n\n def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n \"\"\"\n Find a set of subtrees by iterating through the tree\n and placing \"break\" points when the cost of evaluating the\n subtree exceeds some 'maxCost'. This ensure ~ equal cost\n trees, but doesn't ensure any particular number of them.\n\n maxCostRate can be set to implement a varying maxCost\n over the course of the iteration.\n \"\"\"\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost\n\n ##################################################################\n # Part I: find a list of where the current tree should be broken #\n ##################################################################\n\n subTreeSetList = []\n if numSubTrees is not None:\n\n subTreeSize = len(self) // numSubTrees\n for i in range(numSubTrees):\n end = (i + 1) * subTreeSize if (i < numSubTrees - 1) else len(self)\n subTreeSetList.append(set(range(i * subTreeSize, end)))\n\n else: # maxSubTreeSize is not None\n k = 0\n while k < len(self):\n end = min(k + maxSubTreeSize, len(self))\n subTreeSetList.append(set(range(k, end)))\n k = end\n\n ##########################################################\n # Part II: create subtrees from index sets\n ##########################################################\n # (common logic provided by base class up to providing a few helper fns)\n\n def permute_parent_element(perm, el):\n \"\"\"Applies a permutation to an element of the tree \"\"\"\n # perm[oldIndex] = newIndex\n return el # no need to permute operation sequence\n\n def create_subtree(parentIndices, numFinal, fullEvalOrder, sliceIntoParentsFinalArray, parentTree):\n \"\"\"\n Creates a subtree given requisite information:\n\n Parameters\n ----------\n parentIndices : list\n The ordered list of (parent-tree) indices to be included in\n the created subtree.\n\n numFinal : int\n The number of \"final\" elements, i.e. those that are used to\n construct the final array of results and not just an intermediate.\n The first numFinal elemements of parentIndices are \"final\", and\n 'sliceIntoParentsFinalArray' tells you which final indices of\n the parent they map to.\n\n fullEvalOrder : list\n A list of the integers between 0 and len(parentIndices)-1 which\n gives the evaluation order of the subtree *including* evaluation\n of any initial elements.\n\n sliceIntoParentsFinalArray : slice\n Described above - map between to-be-created subtree's final\n elements and parent-tree indices.\n\n parentTree : EvalTree\n The parent tree itself.\n \"\"\"\n subTree = TermEvalTree()\n subTree.myFinalToParentFinalMap = sliceIntoParentsFinalArray\n subTree.num_final_strs = numFinal\n subTree[:] = [None] * len(parentIndices)\n subTree.p_polys = {}\n subTree.dp_polys = {}\n subTree.hp_polys = {}\n subTree.repcache = {}\n\n for ik in fullEvalOrder: # includes any initial indices\n k = parentIndices[ik] # original tree index\n circuit = self[k] # original tree data\n subTree.eval_order.append(ik)\n assert(subTree[ik] is None)\n subTree[ik] = circuit\n\n subTree.parentIndexMap = parentIndices # parent index of each subtree index\n subTree.simplified_circuit_spamTuples = [self.simplified_circuit_spamTuples[kk]\n for kk in _slct.indices(subTree.myFinalToParentFinalMap)]\n #subTree._compute_finalStringToEls() #depends on simplified_circuit_spamTuples\n\n final_el_startstops = []; i = 0\n for spamTuples in parentTree.simplified_circuit_spamTuples:\n final_el_startstops.append((i, i + len(spamTuples)))\n i += len(spamTuples)\n subTree.myFinalElsToParentFinalElsMap = _np.concatenate(\n [_np.arange(*final_el_startstops[kk])\n for kk in _slct.indices(subTree.myFinalToParentFinalMap)])\n #Note: myFinalToParentFinalMap maps only between *final* elements\n # (which are what is held in simplified_circuit_spamTuples)\n\n subTree.num_final_els = sum([len(v) for v in subTree.simplified_circuit_spamTuples])\n subTree.recompute_spamtuple_indices(bLocal=False)\n\n subTree.opLabels = self._get_opLabels(subTree.generate_circuit_list(permute=False))\n\n return subTree\n\n updated_elIndices = self._finish_split(elIndicesDict, subTreeSetList,\n permute_parent_element, create_subtree)\n printer.log(\"EvalTree.split done second pass in %.0fs\" %\n (_time.time() - tm)); tm = _time.time()\n return updated_elIndices", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def _forward_best_subset(X, y, nbest=8, beamwidth=40, score=\"bic\"):\n \n assert nbest > 0, \"nbest must be positive\"\n beamwidth = max(beamwidth, nbest)\n \n # Add constant\n Xc = add_constant(X).rename(columns={'const': '(Intercept)'})\n \n def get_bic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().bic\n\n def get_aic(feature_subset):\n return -OLS(y, Xc[feature_subset]).fit().aic\n\n get_score = get_bic if score == \"bic\" else get_aic\n \n features = X.columns\n \n heap = []\n visited = set()\n \n def get_pair(k):\n return get_score(['(Intercept)', *k]), k\n \n k = ()\n heapq.heappush(heap, get_pair(k))\n \n while True:\n modified = False\n min_score = heap[0][0]\n for _, k in heap:\n for f in features:\n if f in k:\n continue\n candidate_features = tuple(sorted([*k, f]))\n if candidate_features in visited:\n continue\n visited.add(candidate_features)\n new_pair = get_pair(candidate_features)\n if new_pair[0] > min_score:\n modified = True\n heapq.heappush(heap, get_pair(candidate_features))\n if len(heap) > beamwidth:\n heapq.heappop(heap)\n min_score = heap[0][0]\n if not modified:\n break\n \n return heapq.nsmallest(nbest, [(-x, ['(Intercept)', *y]) for x, y in heap])", "def generate_move_minimax_ab(\r\n board: np.ndarray, player: BoardPiece, saved_state: Optional[SavedState]\r\n) -> Tuple[PlayerAction, Optional[SavedState]]:\r\n depth = 4\r\n # choose which heuristic to use\r\n heuristic = heuristic_basic\r\n # heuristic = heuristic_better\r\n alpha, beta = int(-1e10), int(1e10) # starting alpha-beta values\r\n action_set = minimax_ab(board, alpha, beta, player, depth, True, heuristic=heuristic)\r\n print(action_set)\r\n # if np.all(board) == 0: # if the board is empty\r\n if len(set(action_set)) == 1: # if all options are the same\r\n action = 3 # put piece in the center\r\n else:\r\n action = np.argmax(action_set) # maximize the best move\r\n return PlayerAction(action), saved_state", "def select(self, batch_size):\n\n #if self.tree.filled_size() < batch_size:\n # print('CALLING REPLAY SAMPLING WHEN NOT FULL ENOUGH')\n # #return None, None\n\n out = []\n indices = []\n #weights = []\n priorities = []\n avoid_resampling = False\n for _ in range(batch_size):\n r = random.random()\n #return (idx, self.tree[idx], self.data[dataIdx])\n data, priority, index = self.tree.find(r)\n #index, priority, data = self.tree.find(r)\n #print(index)\n #print(\"d: {}, \\n priority: {}, \\n index: {}\".format(data, priority, index))\n priorities.append(priority)\n #weights.append((1. / self.memory_size / priority) ** beta if priority > 1e-16 else 0)\n indices.append(index)\n out.append(data)\n if avoid_resampling: self.priority_update([index], [self.epsilon_priority]) # To avoid resampling same transition too much\n\n for i in range(len(priorities)):\n if priorities[i] >= self.bonus_priority: # remove priority bonus\n priorities[i] -= self.bonus_priority\n self.priority_update([indices[i]],[priorities[i]])\n\n # avoid resampling part self.priority_update(indices, priorities) # Revert priorities\n #weights /= max(weights) # Normalize for stability\n return out, indices", "def __getstate__(self):\n return (self.layers, self.best_loss)", "def best_split(self):\r\n best_splits = [[0, None, None]]\r\n impurity, best_S, best_xj = 0, None, None\r\n \r\n for xj in self.x_names:\r\n for S in self.potential_splits(xj):\r\n ir = float(self.impurity_reduction(xj, S))\r\n if ir > impurity:\r\n impurity, best_S, best_xj = ir, S, xj\r\n best_splits.append([S, xj])\r\n else: \r\n pass\r\n \r\n return best_S, best_xj", "def sparse_options(default_solver='spsolve',\n default_least_squares_solver='least_squares_lsmr' if HAVE_SCIPY_LSMR else 'least_squares_generic_lsmr',\n bicgstab_tol=1e-15,\n bicgstab_maxiter=None,\n spilu_drop_tol=1e-4,\n spilu_fill_factor=10,\n spilu_drop_rule='basic,area',\n spilu_permc_spec='COLAMD',\n spsolve_permc_spec='COLAMD',\n spsolve_keep_factorization=True,\n lgmres_tol=1e-5,\n lgmres_maxiter=1000,\n lgmres_inner_m=39,\n lgmres_outer_k=3,\n least_squares_lsmr_damp=0.0,\n least_squares_lsmr_atol=1e-6,\n least_squares_lsmr_btol=1e-6,\n least_squares_lsmr_conlim=1e8,\n least_squares_lsmr_maxiter=None,\n least_squares_lsmr_show=False,\n least_squares_lsqr_damp=0.0,\n least_squares_lsqr_atol=1e-6,\n least_squares_lsqr_btol=1e-6,\n least_squares_lsqr_conlim=1e8,\n least_squares_lsqr_iter_lim=None,\n least_squares_lsqr_show=False,\n pyamg_tol=1e-5,\n pyamg_maxiter=400,\n pyamg_verb=False,\n pyamg_rs_strength=('classical', {'theta': 0.25}),\n pyamg_rs_CF='RS',\n pyamg_rs_presmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_postsmoother=('gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_rs_max_levels=10,\n pyamg_rs_max_coarse=500,\n pyamg_rs_coarse_solver='pinv2',\n pyamg_rs_cycle='V',\n pyamg_rs_accel=None,\n pyamg_rs_tol=1e-5,\n pyamg_rs_maxiter=100,\n pyamg_sa_symmetry='hermitian',\n pyamg_sa_strength='symmetric',\n pyamg_sa_aggregate='standard',\n pyamg_sa_smooth=('jacobi', {'omega': 4.0/3.0}),\n pyamg_sa_presmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_postsmoother=('block_gauss_seidel', {'sweep': 'symmetric'}),\n pyamg_sa_improve_candidates=[('block_gauss_seidel', {'sweep': 'symmetric', 'iterations': 4}), None],\n pyamg_sa_max_levels=10,\n pyamg_sa_max_coarse=500,\n pyamg_sa_diagonal_dominance=False,\n pyamg_sa_coarse_solver='pinv2',\n pyamg_sa_cycle='V',\n pyamg_sa_accel=None,\n pyamg_sa_tol=1e-5,\n pyamg_sa_maxiter=100):\n\n assert default_least_squares_solver.startswith('least_squares')\n\n opts = (('bicgstab_spilu', {'type': 'bicgstab_spilu',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter,\n 'spilu_drop_tol': spilu_drop_tol,\n 'spilu_fill_factor': spilu_fill_factor,\n 'spilu_drop_rule': spilu_drop_rule,\n 'spilu_permc_spec': spilu_permc_spec}),\n ('bicgstab', {'type': 'bicgstab',\n 'tol': bicgstab_tol,\n 'maxiter': bicgstab_maxiter}),\n ('spsolve', {'type': 'spsolve',\n 'permc_spec': spsolve_permc_spec,\n 'keep_factorization': spsolve_keep_factorization}),\n ('lgmres', {'type': 'lgmres',\n 'tol': lgmres_tol,\n 'maxiter': lgmres_maxiter,\n 'inner_m': lgmres_inner_m,\n 'outer_k': lgmres_outer_k}),\n ('least_squares_lsqr', {'type': 'least_squares_lsqr',\n 'damp': least_squares_lsqr_damp,\n 'atol': least_squares_lsqr_atol,\n 'btol': least_squares_lsqr_btol,\n 'conlim': least_squares_lsqr_conlim,\n 'iter_lim': least_squares_lsqr_iter_lim,\n 'show': least_squares_lsqr_show}))\n\n if HAVE_SCIPY_LSMR:\n opts += (('least_squares_lsmr', {'type': 'least_squares_lsmr',\n 'damp': least_squares_lsmr_damp,\n 'atol': least_squares_lsmr_atol,\n 'btol': least_squares_lsmr_btol,\n 'conlim': least_squares_lsmr_conlim,\n 'maxiter': least_squares_lsmr_maxiter,\n 'show': least_squares_lsmr_show}),)\n\n if HAVE_PYAMG:\n opts += (('pyamg', {'type': 'pyamg',\n 'tol': pyamg_tol,\n 'maxiter': pyamg_maxiter}),\n ('pyamg-rs', {'type': 'pyamg-rs',\n 'strength': pyamg_rs_strength,\n 'CF': pyamg_rs_CF,\n 'presmoother': pyamg_rs_presmoother,\n 'postsmoother': pyamg_rs_postsmoother,\n 'max_levels': pyamg_rs_max_levels,\n 'max_coarse': pyamg_rs_max_coarse,\n 'coarse_solver': pyamg_rs_coarse_solver,\n 'cycle': pyamg_rs_cycle,\n 'accel': pyamg_rs_accel,\n 'tol': pyamg_rs_tol,\n 'maxiter': pyamg_rs_maxiter}),\n ('pyamg-sa', {'type': 'pyamg-sa',\n 'symmetry': pyamg_sa_symmetry,\n 'strength': pyamg_sa_strength,\n 'aggregate': pyamg_sa_aggregate,\n 'smooth': pyamg_sa_smooth,\n 'presmoother': pyamg_sa_presmoother,\n 'postsmoother': pyamg_sa_postsmoother,\n 'improve_candidates': pyamg_sa_improve_candidates,\n 'max_levels': pyamg_sa_max_levels,\n 'max_coarse': pyamg_sa_max_coarse,\n 'diagonal_dominance': pyamg_sa_diagonal_dominance,\n 'coarse_solver': pyamg_sa_coarse_solver,\n 'cycle': pyamg_sa_cycle,\n 'accel': pyamg_sa_accel,\n 'tol': pyamg_sa_tol,\n 'maxiter': pyamg_sa_maxiter}))\n opts = OrderedDict(opts)\n opts.update(genericsolvers.options())\n def_opt = opts.pop(default_solver)\n if default_least_squares_solver != default_solver:\n def_ls_opt = opts.pop(default_least_squares_solver)\n ordered_opts = OrderedDict(((default_solver, def_opt),\n (default_least_squares_solver, def_ls_opt)))\n else:\n ordered_opts = OrderedDict(((default_solver, def_opt),))\n ordered_opts.update(opts)\n return ordered_opts" ]
[ "0.59374243", "0.5722452", "0.51469857", "0.51208717", "0.51167727", "0.5081394", "0.50553244", "0.50473976", "0.5028678", "0.5010286", "0.49406895", "0.49098796", "0.48995638", "0.4865513", "0.48511472", "0.48335147", "0.48096806", "0.47716156", "0.4743922", "0.47341198", "0.4722869", "0.47086594", "0.47077912", "0.4700789", "0.4698528", "0.4685798", "0.4673617", "0.4669398", "0.46493873", "0.4645071", "0.46255672", "0.46174526", "0.46142235", "0.46131715", "0.46114528", "0.46033642", "0.4585919", "0.45763364", "0.45712084", "0.45701244", "0.4563361", "0.45628992", "0.45590693", "0.4554576", "0.45524833", "0.4548369", "0.45424306", "0.4541135", "0.45402655", "0.45307186", "0.45241815", "0.45224518", "0.4519764", "0.4499005", "0.44896454", "0.44873953", "0.4485861", "0.4485578", "0.44846717", "0.44838238", "0.44795236", "0.44722974", "0.44702578", "0.44677308", "0.4466305", "0.44640833", "0.4463898", "0.44589764", "0.44509584", "0.444103", "0.44391286", "0.44369102", "0.44332528", "0.4430601", "0.44252738", "0.44186303", "0.44179106", "0.4417133", "0.4414748", "0.44138476", "0.44055632", "0.44055012", "0.44038403", "0.44023478", "0.44005024", "0.43987837", "0.4396916", "0.4394488", "0.43933362", "0.4393146", "0.43925005", "0.43889436", "0.438676", "0.43846273", "0.43840456", "0.43840235", "0.4382403", "0.4380621", "0.43785006", "0.4376741" ]
0.7721114
0
Equation 3 from the Alpa paper. Primary difference from the paper is the s 1 max_n_succ_stages check then placing that stage would lead to OOM and thus continue).
def inter_op_dp_inner_loop( n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages ): F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32) F_stage_max = np.full( (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32 ) F_argmin = np.full( (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32 ) F[0, n_layers, 0] = 0 for d in range(1, n_devices + 1): for ( l, i, submesh_shape_idx, sharding_config_idx, stage_cost, ) in valid_idxs_costs: l, i, submesh_shape_idx, sharding_config_idx = map( int, (l, i, submesh_shape_idx, sharding_config_idx) ) n_submesh_devices = submesh_sizes[submesh_shape_idx] if n_submesh_devices <= d: for s in range(1, n_layers + 1): if ( s - 1 > max_n_succ_stages[ l, i, submesh_shape_idx, sharding_config_idx ] ): continue new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost if new_cost < F[s, l, d]: F[s, l, d] = new_cost F_argmin[s, l, d] = ( i + 1, submesh_shape_idx, sharding_config_idx, ) F_stage_max[s, l, d] = max( F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost ) return F, F_stage_max, F_argmin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def N_stages(self):\n return 5", "def num_seq_dep_stages(self):\n n_s = [0]*len(self)\n for i in range(len(self)):\n for j in range(i):\n if self.A[i,j] != 0:\n n_s[i] = max(n_s[i], n_s[j]+1)\n\n n = 0\n for i in range(len(self)):\n if self.b[i] != 0:\n n = max(n, n_s[i]+1)\n return n", "def _hs_reducible_stages(self,tol=1.e-13):\n m=len(self)\n mindiff=10.\n for i in range(m):\n for j in range(i+1,m):\n dif = np.max(np.abs(self.A[i,:]-self.A[j,:]))\n if dif<tol: return True,[i,j]\n mindiff=min(mindiff,dif)\n return False, mindiff", "def _dj_reducible_stages(self,tol=1.e-13):\n from copy import copy\n b=self.b; A=self.A\n Nset = [j for j in range(len(b)) if abs(b[j])<tol]\n while len(Nset)>0: #Try successively smaller sets N\n Nsetold=copy(Nset)\n for j in Nset: #Test whether stage j matters\n remove_j=False\n for i in range(len(self)):\n if i not in Nset and abs(A[i,j])>tol: #Stage j matters\n remove_j=True\n continue\n if remove_j: continue\n if remove_j:\n Nset.remove(j)\n continue\n if Nset==Nsetold: return Nset\n if hasattr(self,'embedded_method'):\n Nset2 = self.embedded_method._dj_reducible_stages(tol)\n Nset = [x for x in Nset if x in Nset2]\n return Nset", "def check_stage(self):\n\n #Initalize target and goal_stage to stage1 values\n target = 3\n goal_stage = 2\n\n # Set target and goal_stage if current stage is not 1\n if self.current_stage == 2:\n target = 7\n goal_stage = 3\n elif self.current_stage == 3:\n target = 11\n goal_stage = 4\n\n # Check the stage goals\n if self.die_a.value + self.die_b.value == target and not self.cheating:\n self.current_stage = goal_stage", "def part_1(rules: Rules) -> int:\n\n happiness, _ = max(generate_arrangements(rules))\n print(f\"part 1: optimal arrangement brings {happiness} happiness\")\n return happiness", "def test_is_advancing_to_next_stage_no(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 2, False),\n (\"2\", \"1\", 3, False),\n (\"1\", \"1\", 1, False),\n (\"1\", \"1\", 2, False),\n (\"1\", \"1\", 3, False),\n (\"ANGRY\", \"1\", 1, False),\n (\"ANGRY\", \"1\", 2, False),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def n_delay_stages(self):\n return self._n_delay_stages", "def n_delay_stages(self):\n return self._n_delay_stages", "def num_stages(self) -> int:\n return self.pg_mesh.size(self.pipeline_axis)", "def run(stages, maxsize=0):\n\n if isinstance(stages, list) and len(stages) == 0:\n raise ValueError(\"Expected at least 1 stage to run\")\n\n elif isinstance(stages, list):\n stage = concat(stages, maxsize=maxsize)\n\n else:\n stage = stages\n\n stage = to_iterable(stage, maxsize=maxsize)\n\n for _ in stages:\n pass", "def solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic):\n\n result = {}\n result[\"visualStages\"] = []\n for stage in stages:\n\n stage_dic = {}\n object_dic_copy = copy.deepcopy(objects_dic)\n predicates = stage[\"items\"]\n sorted_predicates = priority(predicates, predicates_rules)\n\n # For hanoi problem, reset each stage\n # For logistics problem, reset each stage\n for fname in gstate[\"reset_function\"]:\n gstate[fname] = {}\n solvepredicates(sorted_predicates, object_dic_copy, predicates_rules, gstate)\n stage_dic[\"visualSprites\"] = object_dic_copy\n if \"stageName\" not in stage:\n stage_dic[\"stageName\"] = \"Inital Stage\"\n stage_dic[\"stageInfo\"] = \"No step information\"\n\n else:\n stage_dic[\"stageName\"] = stage[\"stageName\"]\n stage_dic[\"stageInfo\"] = stage[\"stageInfo\"]\n\n result[\"visualStages\"].append(stage_dic)\n\n result[\"subgoals\"] = Subgoal.get_subgoal(stages, problem_dic[1]['goal'].copy(), actionlist.copy())\n\n return result", "def alpha_beta(self, cur_state, limit, cur_level, alpha, beta, min_level):\n\n # Evaluate current state.\n if cur_level == limit or get_action_score(cur_state.action[0], cur_state.action[1], cur_state.action_player, cur_state.occupied)==100:\n return cur_state.value, cur_state, cur_level, None\n else:\n child_list = cur_state.successors()\n final_state = None\n action_took = None\n if cur_state.player == 1: # MAX player\n for i in range(len(child_list)):\n c = heapq.heappop(child_list)\n (c_alpha, c_state, c_level, action) = self.alpha_beta(c[1], limit, cur_level + 1, alpha, beta, min_level)\n # print(\"HERE: \"+str(c_alpha)+\" \"+str(c_level))\n if (c_alpha > alpha) or (c_alpha == alpha and c_level < min_level):\n alpha = c_alpha\n final_state = c_state\n action_took = c[1].action\n min_level = c_level\n if beta <= alpha:\n break\n return alpha, final_state, min_level, action_took\n else: # MIN player\n for i in range(len(child_list)):\n c = heapq.heappop(child_list)\n (c_beta, c_state, c_level, action) = self.alpha_beta(c[1], limit, cur_level + 1, alpha, beta, min_level)\n # print(\"c_beta = \" + str(c_beta) + \", beta = \" + str(beta))\n if (c_beta < beta) or (c_beta == beta and c_level < min_level):\n beta = c_beta\n final_state = c_state\n action_took = c[1].action\n min_level = c_level\n if beta <= alpha:\n break\n return beta, final_state, min_level, action_took", "def __stage2(self, img, total_boxes, stage_status: StageStatus):\r\n\r\n num_boxes = total_boxes.shape[0]\r\n if num_boxes == 0:\r\n return total_boxes, stage_status\r\n\r\n # second stage\r\n tempimg = np.zeros(shape=(24, 24, 3, num_boxes))\r\n\r\n for k in range(0, num_boxes):\r\n tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))\r\n\r\n tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \\\r\n img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :]\r\n\r\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\r\n tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)\r\n\r\n else:\r\n return np.empty(shape=(0,)), stage_status\r\n\r\n tempimg = (tempimg - 127.5) * 0.0078125\r\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\r\n\r\n out = self._rnet.run(tempimg1)\r\n\r\n out0 = np.transpose(out[0])\r\n out1 = np.transpose(out[1])\r\n\r\n score = out1[1, :]\r\n\r\n ipass = np.where(score > self._steps_threshold[1])\r\n\r\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\r\n\r\n mv = out0[:, ipass[0]]\r\n\r\n if total_boxes.shape[0] > 0:\r\n pick = self.__nms(total_boxes, 0.7, 'Union')\r\n total_boxes = total_boxes[pick, :]\r\n total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))\r\n total_boxes = self.__rerec(total_boxes.copy())\r\n\r\n return total_boxes, stage_status", "def alphaBeta(self,state,alpha,beta,depth=0):\n \n #print(\"NextState (depth \"+str(depth)+\"):\")\n #print(\"Action: \"+state.get_action())\n if state in self.__explored:\n return self.__explored[state.get_hashable_state()]\n \n if state.is_end_state() or depth >= (self.__max_depth-1):\n #Return terminal state's utility value\n self.__explored[state.get_hashable_state()] = state.get_utility_value()\n return state.get_utility_value()\n \n is_max_turn = state.get_max_turn()\n childList = state.get_successors()\n \n if is_max_turn:\n for c in childList:\n #if c in self.__explored.keys():\n # continue\n alpha = max(alpha, self.alphaBeta(c,alpha,beta,depth+1)) \n if beta <= alpha:\n break \n self.__explored[state.get_hashable_state()] = alpha\n return alpha\n else:\n for c in childList:\n #if c in self.__explored.keys():\n # continue\n beta = min(beta, self.alphaBeta(c,alpha,beta,depth+1)) \n if beta <= alpha:\n break \n self.__explored[state.get_hashable_state()] = beta\n return beta", "def max_evidence(self):\n self.A = np.linalg.inv(self.Sn)\n A_eigval = np.linalg.eigvals(self.A)\n gamma = 0\n for i in range(len(A_eigval)):\n gamma += A_eigval[i]/(self.alpha + A_eigval[i])\n new_alpha = gamma/(self.mn.T@self.mn)\n\n sum = 0\n for i in range(self.n):\n sum +=(self.t[i]-self.mn.T@self.design_matrix[i])**2\n new_beta = 1/((1/(self.n-gamma))*sum)\n\n return new_alpha, new_beta", "def run(stages: typing.List[Stage], maxsize: int = 0) -> None:\n\n if isinstance(stages, list) and len(stages) == 0:\n raise ValueError(\"Expected at least 1 stage to run\")\n\n elif isinstance(stages, list):\n stage = concat(stages, maxsize=maxsize)\n\n else:\n stage = stages\n\n stage = to_iterable(stage, maxsize=maxsize)\n\n for _ in stages:\n pass", "def Problem4(n):\n A = Problem2(n)\n eig = min(sl.eigs(A.asfptype(), which='SM')[0])\n \n print \"lamba*n^2 approaches pi^2 as n goes to infinity\"\n return eig*n**2", "def __stage3(self, img, total_boxes, stage_status: StageStatus):\r\n num_boxes = total_boxes.shape[0]\r\n if num_boxes == 0:\r\n return total_boxes, np.empty(shape=(0,))\r\n\r\n total_boxes = np.fix(total_boxes).astype(np.int32)\r\n\r\n status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),\r\n width=stage_status.width, height=stage_status.height)\r\n\r\n tempimg = np.zeros((48, 48, 3, num_boxes))\r\n\r\n for k in range(0, num_boxes):\r\n\r\n tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3))\r\n\r\n tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \\\r\n img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :]\r\n\r\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\r\n tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA)\r\n else:\r\n return np.empty(shape=(0,)), np.empty(shape=(0,))\r\n\r\n tempimg = (tempimg - 127.5) * 0.0078125\r\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\r\n\r\n out = self._onet.run(tempimg1)\r\n out0 = np.transpose(out[0])\r\n out1 = np.transpose(out[1])\r\n out2 = np.transpose(out[2])\r\n\r\n score = out2[1, :]\r\n\r\n points = out1\r\n\r\n ipass = np.where(score > self._steps_threshold[2])\r\n\r\n points = points[:, ipass[0]]\r\n\r\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\r\n\r\n mv = out0[:, ipass[0]]\r\n\r\n w = total_boxes[:, 2] - total_boxes[:, 0] + 1\r\n h = total_boxes[:, 3] - total_boxes[:, 1] + 1\r\n\r\n points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1\r\n points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1\r\n\r\n if total_boxes.shape[0] > 0:\r\n total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))\r\n pick = self.__nms(total_boxes.copy(), 0.7, 'Min')\r\n total_boxes = total_boxes[pick, :]\r\n points = points[:, pick]\r\n\r\n return total_boxes, points", "def num_stages(tree, order):\n p = len(order)\n stages = cstree_to_stages(tree, order)\n return sum([len(stages[i]) for i in range(1,p)])", "def powAlpha( n ):\n return (1-betaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * betaval", "def _remove_stage(self,stage):\n s = len(self)\n A=np.delete(np.delete(self.A,stage,1),stage,0)\n b=np.delete(self.b,stage)\n c=np.delete(self.c,stage)\n self.A=A\n self.b=b\n self.c=c\n if hasattr(self,'bhat'):\n bhat=np.delete(self.bhat,stage)\n self.bhat=bhat\n if self.alpha is not None:\n for i in range(s+1):\n if self.alpha[i,stage] != 0: # Doing this check speeds things up\n self.alpha,self.beta = shu_osher_zero_alpha_ij(self.alpha,self.beta,i,stage)\n alpha=np.delete(np.delete(self.alpha,stage,1),stage,0)\n self.alpha = alpha\n beta=np.delete(np.delete(self.beta,stage,1),stage,0)\n self.beta = beta\n if hasattr(self,'alphahat'):\n for i in range(s+1):\n if self.alphahat[i,stage] != 0: # Doing this check speeds things up\n self.alphahat,self.betahat = shu_osher_zero_alpha_ij(self.alphahat,self.betahat,i,stage)\n alphahat=np.delete(np.delete(self.alphahat,stage,1),stage,0)\n self.alphahat = alphahat\n betahat=np.delete(np.delete(self.betahat,stage,1),stage,0)\n self.betahat = betahat", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def minimax(gamestate, depth, timeTotal, alpha, beta, maxEntity):\n\n bonus = 0\n isTerminalState = gamestate.board.checkTerminalState(gamestate.currentPlayer.noPlayer)\n # Basis Rekursif\n if ((depth == 0) or (time.time() > timeTotal) or (isTerminalState)):\n if (isTerminalState) and (gamestate.currentPlayer.noPlayer == maxEntity):\n bonus = 10\n elif (isTerminalState) and (gamestate.currentPlayer.noPlayer != maxEntity):\n bonus = -10\n return gamestate, U_Function(gamestate.currentPlayer, gamestate.oppositePlayer, gamestate.board.size, maxEntity) + bonus\n\n # Rekurens\n if (gamestate.currentPlayer.noPlayer == maxEntity):\n # Choose the maximum utility of the state\n # Iterate all pion and its possible moves\n maxGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n maxValue = -math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Iterate all possible moves of pion index\n for move in all_possible_moves:\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimax(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old max value\n if (utility > maxValue):\n maxValue = utility\n maxGameState = newGameState\n \n alpha = max(alpha, maxValue)\n if (beta <= alpha):\n return maxGameState, maxValue\n return maxGameState, maxValue\n\n else:\n # Choose the minimum utility of the state\n minGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n minValue = math.inf\n\n # Iterate all pion index\n for idx in range(len(gamestate.currentPlayer.arrayPion)):\n all_possible_moves = gamestate.currentPlayer.listAllPossibleMove(idx, gamestate.board)\n\n # Iterate all possible moves of pion index\n for move in all_possible_moves:\n newGameState = GameState.GameState(gamestate.board, gamestate.currentPlayer, gamestate.oppositePlayer)\n newGameState.currentPlayer.movePion(idx, move, newGameState.board)\n\n recursiveState = GameState.GameState(newGameState.board, newGameState.currentPlayer, newGameState.oppositePlayer)\n recursiveState.nextTurn()\n dummyState, utility = minimax(recursiveState, depth-1, timeTotal, alpha, beta, maxEntity)\n\n # Compare with the old min value\n if (utility < minValue):\n minValue = utility\n minGameState = newGameState\n \n beta = min(beta, minValue)\n if (beta <= alpha):\n return minGameState, minValue\n \n return minGameState, minValue", "def big_analysis(beta0s=[0.5, 0.8, 1.1, 1.4, 1.7], ks=range(6), betaps=[1.2, 1.5, 2, 3]):", "def __stage1(self, image, scales: list, stage_status: StageStatus):\r\n total_boxes = np.empty((0, 9))\r\n status = stage_status\r\n\r\n for scale in scales:\r\n scaled_image = self.__scale_image(image, scale)\r\n\r\n img_x = np.expand_dims(scaled_image, 0)\r\n img_y = np.transpose(img_x, (0, 2, 1, 3))\r\n\r\n out = self._pnet.run(img_y)\r\n\r\n out0 = np.transpose(out[0], (0, 2, 1, 3))\r\n out1 = np.transpose(out[1], (0, 2, 1, 3))\r\n\r\n boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(),\r\n out0[0, :, :, :].copy(), scale, self._steps_threshold[0])\r\n\r\n # inter-scale nms\r\n pick = self.__nms(boxes.copy(), 0.5, 'Union')\r\n if boxes.size > 0 and pick.size > 0:\r\n boxes = boxes[pick, :]\r\n total_boxes = np.append(total_boxes, boxes, axis=0)\r\n\r\n numboxes = total_boxes.shape[0]\r\n\r\n if numboxes > 0:\r\n pick = self.__nms(total_boxes.copy(), 0.7, 'Union')\r\n total_boxes = total_boxes[pick, :]\r\n\r\n regw = total_boxes[:, 2] - total_boxes[:, 0]\r\n regh = total_boxes[:, 3] - total_boxes[:, 1]\r\n\r\n qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw\r\n qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh\r\n qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw\r\n qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh\r\n\r\n total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))\r\n total_boxes = self.__rerec(total_boxes.copy())\r\n\r\n total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)\r\n status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),\r\n width=stage_status.width, height=stage_status.height)\r\n\r\n return total_boxes, status", "def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def objective(trial):\n %time\n env = gym.make('Delivery-v0')\n alpha = trial.suggest_discrete_uniform('alpha', 0.3,0.9,0.3)\n gamma = trial.suggest_discrete_uniform('gamma', 0.6, 1,0.1)\n epsilon = trial.suggest_discrete_uniform('epsilon', 0.01, 0.11, 0.04)\n episodes = 1000000\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n last_reward = np.mean(rewards)\n # trial.report(-1 * last_reward)\n\n return -1 * last_reward", "def dfs_maximizing(state) :\n #print state.describe_previous_move()\n global state_evals, path, _path, _score, level, _state;\n\n level+=1\n path.append(state)\n for stt in state.generate_next_states():\n score=0\n agenda.append((stt, level))\n \n if stt.is_game_over():\n state_evals+=1\n score=stt.get_endgame_score()\n if score>_score:\n _score=score\n _path = path[0:]\n _state = stt\n if not agenda:\n\n _path.append(_state)\n return [_path, _score, state_evals];\n else:\n new_state, level=agenda.pop()\n path=path[0:level]\n level-=1\n return dfs_maximizing(new_state)", "def part3e_1():\n # Run on a few examples and see that the constraints are being met.\n xs = \"Werner & Co entered court today . Werner maintained that they were not guilty .\".split()\n ys = \"-ORG- -ORG- -ORG- -O- -O- -O- -O- -ORG- -O- -O- -O- -O- -O- -O- -O-\".split()\n assert len(xs) == len(ys)\n\n N = 50000\n ys_ = submission.computeGibbsBestSequence(\n englishCRF,\n submission.getLongRangeCRFBlocks,\n submission.chooseGibbsLongRangeCRF,\n xs, \n N)\n grader.requireIsEqual( ys, ys_ )", "def powBeta( n ):\n return (1-alphaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * alphaval", "def get_stages(plan, problem_dic, problem_file, predicates_list):\r\n\r\n # Initial stage\r\n stages = problem_dic[0]['init'].copy()\r\n\r\n with open(problem_file) as file:\r\n text = file.read()\r\n objects = re.findall(r'\\b\\S+\\b', text[text.index(\"objects\")\r\n + len(\"objects\"):text.index(\"init\")])\r\n\r\n # Getting the list of actions from results returned from planning.domain api\r\n try:\r\n actionlist = plan['result']['plan']\r\n except KeyError:\r\n sys.exit(\"No plan have been returned\")\r\n cleanactionlist = remove_unused_char(actionlist)\r\n\r\n content = {\"stages\": [], \"objects\": objects}\r\n content['stages'].append({\"items\": stages.copy()})\r\n # 1. Go through the steps\r\n for counter in range(0, len(actionlist)):\r\n checklist = []\r\n init_object_list = server.PddLparser.visualiserFile.\\\r\n parser.problem_parser.\\\r\n get_object_list(predicates_list, cleanactionlist[counter])\r\n checklist = (init_object_list)\r\n\r\n # 2. Find the difference between 2 steps\r\n addactionlistarr = []\r\n removeactionlistarr = []\r\n for var in checklist:\r\n if var in stages:\r\n removeactionlistarr.append(var)\r\n else:\r\n addactionlistarr.append(var)\r\n\r\n # Append the list to get the final result\r\n for addvar in addactionlistarr:\r\n stages.append(addvar)\r\n for rmvar in removeactionlistarr:\r\n stages.remove(rmvar)\r\n\r\n # 3.\r\n # Append everything to get the final output - content\r\n result = {\"items\": stages.copy(),\r\n \"add\": addactionlistarr,\r\n \"remove\": removeactionlistarr}\r\n content['stages'].append(result)\r\n return content", "def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False", "def isSolvable(state):\n\n invCount = 0\n size = len(state)\n for i in range(0, size-1):\n for j in range(i+1, size):\n if (int(state[j]) and int(state[i]) and state[i] > state[j]):\n invCount += 1\n # return (invCount%2 == 0)\n return 1", "def alphabeta(self, state, alpha, beta, maxPlayer):\n # Are we in terminal state ?\n if state.isWin():\n # Utility function\n return state.getScore()\n if state.isLose():\n return -inf\n\n if maxPlayer: # PACMAN\n visited = self.visited\n utility = -inf\n for successor in state.generatePacmanSuccessors():\n nextPosition = successor[0].getPacmanPosition()\n nextScore = successor[0].getScore()\n # We only consider relevant nodes\n if (nextPosition not in visited or\n nextScore >= visited[nextPosition]):\n visited[nextPosition] = nextScore\n utility = max(utility, self.alphabeta(successor[0],\n alpha, beta, False))\n alpha = max(alpha, utility)\n if alpha >= beta:\n break\n return utility\n else: # GHOST\n utility = inf\n for successor in state.generateGhostSuccessors(1):\n utility = min(utility, self.alphabeta(successor[0], alpha,\n beta, True))\n beta = min(beta, utility)\n if alpha >= beta:\n break\n return utility", "def max_power_in_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_in[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def cost_state(s,state_considered,L,Q,gamma):\n if s==s.goal:\n return 0\n GLG=np.dot(state_considered.G.T,np.dot(L,state_considered.G))\n theta=state_considered.successor[2]\n u=state_considered.successor[1]\n theta_Q_theta=np.dot(theta.T,np.dot(Q,theta))\n v=vertices_cube(s.n)\n J={}\n for index in range(v.shape[0]):\n p=v[index,:].reshape(s.n,1)\n J[index]=0\n for row in range(s.n):\n for k in range(s.n):\n J[index]+=np.asscalar(p[row]*p[k]*GLG[row,k]+p[row]*p[k]*theta_Q_theta[row,k])\n return max(J.values())+np.asscalar(np.dot(state_considered.x.T,np.dot(L,state_considered.x))+np.dot(u.T,np.dot(Q,u))+gamma)", "def solution(self) -> State:", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def solveOneStep(self):\n ### Student code goes here\n if self.currentState.state == self.victoryCondition:\n self.visited[self.currentState]=True\n return True\n return self.BFS()", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def alphabeta_minimize_play(self, game, legal_moves, depth, alpha, beta):\n lowest_score, selected_move = (float('inf'), (-1, -1))\n for move in legal_moves:\n score, _ = self.alphabeta(game.forecast_move(move), depth - 1, alpha, beta, True)\n if score < beta:\n beta = score\n lowest_score, selected_move = score, move\n if beta <= alpha:\n break\n return (lowest_score, selected_move)", "def test_is_advancing_to_next_stage_yes(self):\n\n # test_input_cases =\n # [(die_a_value, die_b_value, stage, ok_output),]\n test_input_cases = [\n (\"1\", \"2\", 1, True),\n (\"2\", \"1\", 1, True),\n (\"ANGRY\", \"4\", 2, True),\n (\"4\", \"ANGRY\", 2, True),\n ]\n\n for test_io in test_input_cases:\n self._test_is_game_over(*test_io)", "def startAlphaBeta(self):\n start = time.time()\n \n alpha = float(\"-inf\")\n beta = float(\"inf\")\n \n is_max_turn = self.__state.get_max_turn()\n childList = self.__state.get_successors()\n \n choice = (None,float(\"-inf\")) if is_max_turn else (None,float(\"inf\"))\n \n if(len(childList) == 1):\n choice = (childList[0],childList[0].get_utility_value())\n else:\n for c in childList:\n val = self.alphaBeta(c,alpha,beta)\n if is_max_turn:\n if ai_config.Config.AVOID_TIE and c.check_path():\n val = val + (-1 - val)/2\n if val > choice[1]:\n choice = (c,val)\n alpha = val\n else:\n if ai_config.Config.AVOID_TIE and c.check_path():\n val = val + (1 - val)/2\n if val < choice[1]:\n choice = (c,val)\n beta = val \n \n self.__num_explored = len(self.__explored.keys())\n self.__explored.clear()\n \n end = time.time()\n \n self.__time_elapsed = end-start\n \n print(\"Utility: \"+\"{0:.3f}\".format(choice[1]))\n print(\"Nodes Explored: \"+str(self.__num_explored))\n print(\"Time Elapsed: \"+\"{0:.3f} seconds\".format(self.__time_elapsed))\n \n return choice[0]", "def compute_lb_totalflow(self):\r\n lb_totalflow = np.amax(1 - np.exp(self.beta - self.b + self.phi * np.minimum(1, 1/self.l))) \r\n return max(0, lb_totalflow)", "def alphabeta_maximize_play(self, game, legal_moves, depth, alpha, beta):\n highest_score, selected_move = (float('-inf'), (-1, -1))\n for move in legal_moves:\n score, _ = self.alphabeta(game.forecast_move(move), depth - 1, alpha, beta, False)\n if score > alpha:\n alpha = score\n highest_score, selected_move = score, move\n if alpha >= beta:\n break\n return (highest_score, selected_move)", "def _advance_to_next_stage(self, config_ids, losses):\n rank = nondominated_sort(losses)\n indices = np.array(range(len(losses)))\n keep_indices = np.array([], dtype=int)\n\n # nondominance rank-based selection\n i = 0\n while len(keep_indices) + sum(rank == i) <= self.num_configs[self.stage]:\n keep_indices = np.append(keep_indices, indices[rank == i])\n i += 1\n keep_indices = np.append(keep_indices, indices[rank == i])\n\n # hypervolume contribution-based selection\n #ys_r = losses[rank == i]\n #indices_r = indices[rank == i]\n #worst_point = np.max(losses, axis=0)\n #reference_point = np.maximum(\n # np.maximum(\n # 1.1 * worst_point, # case: value > 0\n # 0.9 * worst_point # case: value < 0\n # ),\n # np.full(len(worst_point), eps) # case: value = 0\n #)\n\n #S = []\n #contributions = []\n #for j in range(len(ys_r)):\n # contributions.append(hypervolume([ys_r[j]]).compute(reference_point))\n #while len(keep_indices) + 1 <= self.num_configs[self.stage]:\n # hv_S = 0\n # if len(S) > 0:\n # hv_S = hypervolume(S).compute(reference_point)\n # index = np.argmax(contributions)\n # contributions[index] = -1e9 # mark as already selected\n # for j in range(len(contributions)):\n # if j == index:\n # continue\n # p_q = np.max([ys_r[index], ys_r[j]], axis=0)\n # contributions[j] = contributions[j] - (hypervolume(S + [p_q]).compute(reference_point) - hv_S)\n # S = S + [ys_r[index]]\n # keep_indices = np.append(keep_indices, indices_r[index])\n\n return_stat = np.zeros((len(losses))).astype(bool)\n return_stat[keep_indices] = True\n return return_stat\n\n # ranks = np.argsort(np.argsort(losses))\n # return (ranks < self.num_configs[self.stage])", "def get_stages(self):\n current_stage = self.wf['root']\n\n while current_stage:\n yield current_stage\n next_stage = set()\n for n in current_stage:\n next_stage.update(self.wf['action'][n].get('next', set()))\n current_stage = next_stage", "def max_power_out_candidate_storage_rule(_m, g, y, s, t):\r\n\r\n return m.p_out[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def max_diffs(state):\n # your code here\n return best_action(state, pig_actions, Q_pig, win_diff)", "def auto_stage(vessel, max_autostage):\n if not vessel.available_thrust:\n active_stage = 99\n active_engines = [e for e in vessel.parts.engines if e.active]\n for engine in active_engines:\n active_stage = min(engine.part.stage, active_stage)\n\n if active_stage > max_autostage:\n old_thr = vessel.control.throttle\n vessel.control.throttle = 0\n\n while not vessel.available_thrust:\n time.sleep(0.5)\n vessel.control.activate_next_stage()\n\n vessel.control.throttle = old_thr", "def max_powerflow_rule(_m, l, y, s, t):\r\n\r\n return m.p_L[l, y, s, t] - m.POWERFLOW_MAX[l] <= 0", "def Es_case_B(z, x, gamma):\n \n if z == 0 and x == 0:\n return 0\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n #alp = alpha(z, x, beta2)\n alp = alpha_exact_case_B_brentq(z, x, beta)\n \n sin2a = sin(2*alp)\n cos2a = cos(2*alp) \n\n kap = 2*(alp - z)/beta\n #kap = sqrt(x**2 + 4*(1+x)*sin(alp)**2) # kappa for case B\n \n N1 = cos2a - (1+x)\n N2 = (1+x)*sin2a - beta*kap\n D = kap - beta*(1+x)*sin2a\n \n #print(N1, N2, D)\n \n # SC term with prefactor 1/(gamma*beta)^2 = 1/(gamma^2-1)\n NSC = (sin2a - beta*kap *cos2a)/ (gamma**2-1) \n \n return (N1*N2)/D**3\n #return (N1*N2 + NSC)/D**3", "def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v", "def solve(self, state, times):", "def is_advancing_to_next_stage(self):\n if self.game_stage == 1:\n return (self.die_a.current_value == \"1\" and self.die_b.current_value == \"2\" or\n self.die_a.current_value == \"2\" and self.die_b.current_value == \"1\")\n if self.game_stage == 2:\n return (self.die_a.current_value == \"ANGRY\" and self.die_b.current_value == \"4\" or\n self.die_a.current_value == \"4\" and self.die_b.current_value == \"ANGRY\")\n if self.game_stage == 3:\n return False", "def alpha(SA, CT, p):\n\n SA = np.maximum(SA, 0)\n\n xs = np.sqrt(sfac * SA + soffset)\n ys = CT * 0.025\n z = p * 1e-4\n\n v_CT_part = (a000\n + xs * (a100 + xs * (a200 + xs * (a300 + xs * (a400 + a500 * xs))))\n + ys * (a010 + xs * (a110 + xs * (a210 + xs * (a310 + a410 * xs)))\n + ys * (a020 + xs * (a120 + xs * (a220 + a320 * xs))\n + ys * (a030 + xs * (a130 + a230 * xs)\n + ys * (a040 + a140 * xs + a050 * ys))))\n + z * (a001 + xs * (a101 + xs * (a201 + xs * (a301 + a401 * xs)))\n + ys * (a011 + xs * (a111 + xs * (a211 + a311 * xs))\n + ys * (a021 + xs * (a121 + a221 * xs)\n + ys * (a031 + a131 * xs + a041 * ys)))\n + z * (a002 + xs * (a102 + xs * (a202 + a302 * xs))\n + ys * (a012 + xs * (a112 + a212 * xs)\n + ys * (a022 + a122 * xs + a032 * ys))\n + z * (a003 + a103 * xs + a013 * ys + a004 * z))))\n\n specific_volume = (v000\n + xs * (v100 + xs * (v200 + xs * (v300 + xs * (v400 + xs * (v500\n + xs * v600)))))\n + ys * (v010\n + xs * (v110 + xs * (v210 + xs * (v310 + xs * (v410 + xs * v510))))\n + ys * (v020 + xs * (v120 + xs * (v220 + xs * (v320 + xs * v420)))\n + ys * (v030 + xs * (v130 + xs * (v230 + xs * v330))\n + ys * (v040 + xs * (v140 + xs * v240)\n + ys * (v050 + xs * v150 + ys * v060)))))\n + z * (v001\n + xs * (v101 + xs * (v201 + xs * (v301 + xs * (v401 + xs * v501))))\n + ys * (v011 + xs * (v111 + xs * (v211 + xs * (v311 + xs * v411)))\n + ys * (v021 + xs * (v121 + xs * (v221 + xs * v321))\n + ys * (v031 + xs * (v131 + xs * v231)\n + ys * (v041 + xs * v141 + ys * v051))))\n + z * (v002\n + xs * (v102 + xs * (v202 + xs * (v302 + xs * v402)))\n + ys * (v012 + xs * (v112 + xs * (v212 + xs * v312))\n + ys * (v022 + xs * (v122 + xs * v222)\n + ys * (v032 + xs * v132 + ys * v042)))\n + z * (v003\n + xs * (v103 + xs * v203)\n + ys * (v013 + xs * v113 + ys * v023)\n + z * (v004 + xs * v104 + ys * v014\n + z * (v005 + z * v006))))))\n\n return 0.025 * v_CT_part / specific_volume", "def get_optimal_submesh_assignments(\n best_n_stages, F_argmin, n_devices, n_ops, submesh_sizes\n):\n current_s = best_n_stages\n current_layer = 0\n current_devices = n_devices\n\n optimal_layer_submesh_assignments = []\n while current_s > 0 and current_layer < n_ops and current_devices > 0:\n next_start_layer, submesh_shape_idx, sharding_config_idx = F_argmin[\n current_s, current_layer, current_devices\n ]\n assert next_start_layer != -1 and current_devices != -1\n optimal_layer_submesh_assignments.append(\n ((current_layer, next_start_layer), submesh_shape_idx, sharding_config_idx)\n )\n current_s -= 1\n current_layer = next_start_layer\n current_devices -= submesh_sizes[submesh_shape_idx]\n\n assert current_s == 0 and current_layer == n_ops and current_devices == 0\n\n return optimal_layer_submesh_assignments", "def main():\n S = input()\n ans = editorial(S)\n ans2 = dp_simple(S)\n ans3 = cumsum(S)\n\n assert ans == ans2 == ans3\n if len(S) <= 5:\n ans4 = TLE(S)\n assert ans == ans4\n\n print(ans)", "def stage_order(self,tol=1.e-14):\n from sympy import simplify\n simp_array = np.vectorize(sympy.simplify)\n k,B,C=0,0.,0.\n while np.all(abs(B)<tol) and np.all(abs(C)<tol):\n k=k+1\n B=simplify(np.dot(self.b,self.c**(k-1)))-1./k\n C=simp_array(np.dot(self.A,self.c**(k-1))-self.c**k/k)\n return k-1", "def solveOneStep(self):\n ### Student code goes here\n if self.first_step == False:\n self.first_step = True\n if self.solveOneStep():\n return True\n if self.queue:\n self.gm_init()\n ele = self.queue.get()\n #print (len(ele))\n state = ele[0]\n premoves = ele[1]\n\n for m in premoves:\n self.gm.makeMove(m)\n if state.state == self.victoryCondition:\n return True\n self.visited[state] = True\n print(\"CURRENTSTATE:\")\n print(self.gm.getGameState())\n print(\"*******\")\n moves = self.gm.getMovables()\n for m in moves:\n self.gm.makeMove(m)\n if (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n self.gm.reverseMove(m)\n continue\n self.visited[GameState(self.gm.getGameState(), 0, None)] = True\n new_pmv = [i for i in premoves]\n new_pmv.append(m)\n next_state = GameState(self.gm.getGameState(), state.depth+1, m)\n next_state.parent = state\n state.children.append(next_state)\n self.queue.put([next_state, new_pmv])\n self.gm.reverseMove(m)\n self.currentState = state\n\n #for i in range(len(premoves)-1, -1, -1):\n # mv = premoves[i]\n # self.gm.reverseMove(mv)\n return False", "def part_2(rules: Rules) -> int:\n\n rules_with_myself = add_myself(rules)\n happiness, _ = max(generate_arrangements(rules_with_myself))\n print(f\"part 2: optimal arrangement (including myself) brings {happiness} happiness\")\n return happiness", "def calibration(N,kb,T,Ekinv,V):\n lamb = np.sqrt((N-1)*3*kb*T/(Ekinv*2))\n \n if lamb < 0.9999:\n V = lamb*V\n elif lamb>1.0001:\n V = lamb*V\n \n return V", "def _max_value(\r\n self,\r\n state: TwoPlayerGameState,\r\n alpha: float,\r\n beta: float,\r\n depth: int,\r\n ) -> float:\r\n if state.end_of_game or depth == 0:\r\n phi = self.heuristic.evaluate(state)\r\n else:\r\n phi = -np.inf\r\n\r\n successors = self.generate_successors(state)\r\n for successor in successors:\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, phi))\r\n\r\n successor_beta = self._min_value(\r\n successor, alpha, beta, depth - 1\r\n )\r\n\r\n # Maximizing the min value\r\n if (successor_beta > phi):\r\n phi = successor_beta\r\n\r\n # Pruning\r\n if phi >= beta:\r\n return phi\r\n\r\n alpha = max(alpha, phi)\r\n\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, beta))\r\n\r\n return phi", "def greedy(self, state, timestep, epsilon=0):\n\n counts = np.bincount(self.call_locs, minlength=self.num_nodes)\n # print(self.lengths)\n # print(counts)\n score = self.lengths @ counts\n action = []\n for _ in range(self.num_ambulance):\n node = np.argmin(score)\n action.append(node)\n score[node] = 99999999\n return action", "def rmax(env, gamma, m, R_max, epsilon, num_episodes, max_step = 6):\n\n Q = np.ones((env.nS, env.nA)) * R_max / (1 - gamma)\n R = np.zeros((env.nS, env.nA))\n nSA = np.zeros((env.nS, env.nA))\n nSASP = np.zeros((env.nS, env.nA, env.nS))\n ########################################################\n # YOUR CODE HERE #\n ########################################################\n\n # Generate episodes\n average_scores = []\n accum = 0.0\n term = int(np.log(1 / (epsilon * (1 - gamma))) / (1 - gamma))\n for i in xrange(num_episodes):\n S = env.reset()\n done = False\n episode_reward = 0.0\n n_steps = 0\n\n while not done:\n\n if n_steps >= max_step:\n break\n\n A = np.argmax([Q[S,a] for a in range(env.nA)])\n\n # Make an action\n nextS, reward, done, _ = env.step(A)\n episode_reward += reward\n\n # R-Max\n if nSA[S, A] < m:\n nSA[S, A] += 1\n R[S, A] += reward\n nSASP[S, A, nextS] += 1\n\n if nSA[S, A] == m:\n for j in range(term):\n for S_bar in range(env.nS):\n for A_bar in range(env.nA):\n if nSA[S_bar, A_bar] >= m:\n N = float(nSA[S_bar, A_bar])\n T_hat = nSASP[S_bar, A_bar, :] / N\n R_hat = R[S_bar, A_bar] / N\n Q[S_bar, A_bar] = R_hat\n Q[S_bar, A_bar] += gamma * np.sum(T_hat * np.max(Q, axis=1))\n\n\n # Update Q-value\n S = nextS\n n_steps += 1\n\n accum += episode_reward\n average_scores.append(accum/(i+1))\n\n plt.plot(average_scores[:10000], label=\"m=%d\"%(m))\n\n ########################################################\n # END YOUR CODE #\n ########################################################\n return Q", "def convertTumorStage(tumor_stage):\n stages= {0:0,1:1,2:3,3:5}\n sub_stages = {'a':0,'b':1,'c':2}\n if tumor_stage == \"not reported\":\n return None\n else:\n stage = tumor_stage.split(' ')[1]\n if stage.count('v') > 0:\n return 8\n else:\n count = stage.count('i')\n count = stages[count]\n if stage[-1] in ['a','b','c']:\n return count + sub_stages[stage[-1]]\n else:\n return count", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n # For this problem we will be reusing the majority of our work from question 2, but we will be\n # implementing alpha-beta pruning on top of our existing minimax infrastructure\n actionList = gameState.getLegalActions(0)\n pacmanAgentIndex = 0\n ghostAgentIndices = list(range(1,gameState.getNumAgents())) # List of each agent index for looping\n count = util.Counter()\n agentEnd = gameState.getNumAgents()-1 # Last agent in the list\n\n def maximizer(curState, agentIndex, alpha, beta, depth):\n\n ghostActions = curState.getLegalActions(agentIndex)\n maxDepth = self.depth # Quantifying the end of the tree so we know when we reached a leaf node\n weight = -99999999 # Worst case starting value to be changed in the code\n if depth == maxDepth: # If we are at a leaf node\n return self.evaluationFunction(curState) # evaluate the state of this leaf node\n # Otherwise, we progress the tree until the above condition is reached\n if len(ghostActions) != 0:\n for x in ghostActions:\n if weight >= minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth):\n weight = weight\n else:\n weight = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight > beta:\n return weight\n if alpha < weight:\n alpha = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n def minimizer(curState, agentIndex, alpha, beta, depth):\n ghostActions = curState.getLegalActions(agentIndex)\n weight = 999999999 # Worst case starting value to be changed in the code\n if len(ghostActions) != 0:\n if agentIndex == agentEnd: # If we've reached the last ghost, we maximise\n for x in ghostActions: # For each legal action in the current position\n temp = maximizer(curState.generateSuccessor(agentIndex, x), pacmanAgentIndex, alpha, beta, depth+1)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n else: # Otherwise, we continue to minimize\n for x in ghostActions: # For each legal action in the current position\n temp = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n endWeight = -999999999\n alpha = -999999999\n beta = 999999999\n\n # Executing the minimizer for all possible actions\n for x in actionList:\n tempState = gameState.generateSuccessor(pacmanAgentIndex,x)\n endWeight = minimizer(tempState, 1, alpha, beta, 0,)\n count[x] = endWeight\n if alpha < endWeight:\n alpha = endWeight\n # print('HELLO THERE')\n # print(count)\n return count.argMax()", "def alpha_beta_search(self, game_state, depth):\r\n alpha = float(\"-inf\")\r\n beta = float(\"inf\")\r\n best_score = float(\"-inf\")\r\n best_move = None\r\n for a in game_state.actions():\r\n vv = self.min_value(game_state.result(a), alpha, beta, depth)\r\n alpha = max(alpha, vv)\r\n if vv > best_score:\r\n best_score = vv\r\n best_move = a\r\n return best_move", "def solve(m):\n\t\n #with the assumption that at least one terminal state is given:\n if(len(m)==2 or len(m)==1): return [1,1]\n \n #Normalizing the in. matrix and identifying the trans./abs. states:\n m = normalizeProbabilityMatrix(m)\n t = getTransientStates(m)\n a = getAbsorbingStates(m)\n\t\n if len(a) >0:\n print( str(len(a)) + \" absorbing state\" + (\"\" if len(a)<=1 else \"s\" ))\n else:\n print(\"No absorbing state detected\")\n return\n \n #Getting the matrices Q and R as in the canonical form:\n Q = getQ(m,t)\n R = getR(m,t,a)\n I = getIdentity(len(Q))\n I_Q = subtractMatrices(I, Q)\n \n #Getting the fundamental matrix\n N = invertMatrix(I_Q)\n F = multiplyMatrices(N,R)\n \n #packing the result with a common denominator:\n gcd = getGCD(F[0]).denominator\n res=[]\n sum = 0\n for r in F[0]:\n val = int(r.numerator*(gcd/r.denominator))\n sum+=val\n res.append(val)\n res.append(sum) \n return res", "def cal_step(net_a, net_b, prior_a,prior_b, posterior_a, posterior_b,origins,destinations,od_flow,od_flow_b,v_b,timecost_b,_ulabel_lane,per_b):\n lb = 0.0000000001\n ub = 0.99\n step = (lb + ub) / 2.0\n while abs(div(net_a, net_b, prior_a,prior_b, posterior_a, posterior_b,origins,destinations,od_flow,od_flow_b,v_b,step,timecost_b,_ulabel_lane,per_b)) >= 0.01 and abs(ub-lb) > 0.0001:\n if div(net_a, net_b, prior_a,prior_b, posterior_a, posterior_b,origins,destinations,od_flow,od_flow_b,v_b,step,timecost_b,_ulabel_lane,per_b) * div(net_a, net_b, prior_a,prior_b, posterior_a, posterior_b,origins,destinations,od_flow,od_flow_b,v_b,ub,timecost_b,_ulabel_lane,per_b) > 0:\n ub = step\n else:\n lb = step\n step = (lb + ub) / 2.0\n return step", "def forwardVariableGeneration(self):\n self.alpha = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialistation\n self.alpha[0,0] = 1.0\n self.alpha[1:,0] = 0.0\n self.alpha[0,1:] = 0.0\n\n # main recursion\n for t in range(1, self.T+1):\n for j in range(1, self.noOfEmmittingStates+1):\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k, t-1] * self.transitionMatrix[k, j-1])\n self.alpha[j, t] = self.b[j-1, t-1] * partialSum\n # since must end in final state, last alpha for states with zero transition\n # prob to last state must be zero?\n for row in range(self.transitionMatrix.shape[0]):\n if self.transitionMatrix[row,-1] == 0.0:\n self.alpha[row,-1] = 0.0\n # fwd prob variable for final state at 'last' timestep gets bumped into the\n # final column to save having a needless column\n partialSum = 0\n for k in range(self.noOfEmmittingStates+1):\n partialSum += (self.alpha[k,-1] * self.transitionMatrix[k,-1])\n self.alpha[-1,-1] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]", "def max_diffs(state):\n return best_action(state, pig_actions, Q_pig, win_diff)", "def beta(self,state,time,observation):\n trans = self.transition_map\n em = self.emission_map\n states = self.states\n O = observation\n\n @memoize\n def beta_helper(i,t,O):\n #print('State: ' + str(i))\n #print('Time: ' + str(t))\n #assert that the world is safe\n assert (t >= 0)\n assert (t <= len(O))\n #grab the base case\n if t == len(O):\n return 1\n #recursive application, equation 9.11\n else:\n if O[t] == ',':\n print(\"HERE\")\n import sys\n sys.exit(1)\n return sum(beta_helper(j,t+1,O)*trans[i][j]*em[i][O[t]] for j in states)\n\n return beta_helper(state,time,O)", "def boundary_op_n(v):\r\n h = list(v.dic.keys())[0]\r\n p = len(h) - 1\r\n s = P_chains([],[])\r\n if (p != 0) and (isinstance(h, str) != True) and (isinstance(h, frozenset) != True) and (isinstance(h, ImmutableMatrix) != True):\r\n if (is_int(list(v.dic.keys())) == True):\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n return s\r\n else:\r\n aux = P_chains([],[])\r\n D = {}\r\n ct = 0\r\n st = []\r\n for u in v.dic.keys():\r\n for x in u:\r\n if x not in st:\r\n st.append(x)\r\n for i in st:\r\n D[tuple([ct])] = i\r\n ct = ct + 1\r\n for u in v.dic.keys():\r\n w2 = []\r\n for x in u:\r\n for y in list(D.keys()):\r\n if (x == D[y]):\r\n w2.append(y)\r\n aux = aux + P_chains([tuple(w2)],[v.dic[u]]) \r\n v = aux\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n s2 = P_chains([],[])\r\n for u in s.dic.keys():\r\n w2=[]\r\n for i in u:\r\n w2.append(D[i])\r\n s2 = s2 + P_chains([tuple(w2)],[s.dic[u]])\r\n \r\n return s2\r\n else:\r\n return s", "def next_move(self, cur_state):\n\n alpha, final_state, min_level, action_took = self.alpha_beta(cur_state, 2, 0, -math.inf, math.inf, math.inf)\n #print(\"-----------------------------------------\")\n #print(\"value = \"+str(alpha)+\", min_level = \"+str(min_level))\n #print(\"previous: top=\"+str(cur_state.top)+\", bottom=\"+str(cur_state.bottom)+\", left=\"+str(cur_state.left)+\", right=\"+str(cur_state.right))\n #print(final_state.pre_state)\n return action_took", "def getaplhabeta(self, gameState, agent_ind, depth, agent_num, alpha, beta):\n\n # if not the last agent\n next_agent_ind = agent_ind + 1\n next_depth = depth\n\n # if is the last agent\n if agent_ind == agent_num - 1: # the last agent. add another depth\n next_depth = depth + 1\n next_agent_ind = 0\n\n if depth == self.depth or gameState.isWin() or gameState.isLose():\n return [None, self.evaluationFunction(gameState)]\n # agent_ind = currLevel % agent_num\n\n if agent_ind == 0: # pacman, from negative infinity to the maximum score\n score = -float('inf')\n else: # ghost, from positive infinity to xthe minimum score\n score = float('inf')\n\n all_actions = gameState.getLegalActions(agent_ind)\n\n # print(\"all_actions\", all_actions)\n bestAction = None\n\n if agent_ind == 0: # max\n for action in all_actions:\n succState = gameState.generateSuccessor(agent_ind, action)\n result = self.getaplhabeta(succState, next_agent_ind, next_depth, agent_num, alpha, beta)\n child_score = result[1]\n alpha = max(alpha, child_score)\n if score < child_score:\n # print(\"action,\", action)\n score = child_score\n bestAction = action\n if beta <= alpha:\n break\n return [bestAction, alpha]\n else: # min\n for action in all_actions:\n succState = gameState.generateSuccessor(agent_ind, action)\n result = self.getaplhabeta(succState, next_agent_ind, next_depth, agent_num, alpha, beta)\n child_score = result[1]\n beta = min(beta, child_score)\n if score > child_score:\n score = child_score\n bestAction = action\n if beta <= alpha:\n break\n return [bestAction, beta]\n\n return [bestAction, score]", "def stages(self):\n return self._stages", "def stages(self):\n return self._stages", "def can_play_stage(stamina, free_slots):\n if free_slots >= 5 and stamina > 0:\n return True\n return False", "def phase_Neptune(alpha):\n phase = 10.**(-0.4*(7.944e-3*alpha + 9.617e-5*alpha**2.))\n return phase", "def act_func_part3_bej_sto_v1(action_raw, action_raw_idx, raw_state_limits, stptLmt, ob_this_raw, logger, is_show_debug):\n CLG_DMD_IDX = 14;\n CHILLER1_CAP = 1294100 # W\n CHILLER2_CAP = 1294100 # W\n CHILLER3_CAP = 685700 # W\n\n act_choice_0 = [1,0,0,0,0];\n act_choice_1 = [0,1,0,0,0];\n act_choice_2 = [0,0,1,0,0];\n act_choice_3 = [0,0,0,1,0];\n act_choice_4 = [0,0,0,0,1];\n act_num = 5;\n act_choices = [act_choice_0, act_choice_1, act_choice_2, \n act_choice_3, act_choice_4]; \n act_0_max_cap = CHILLER3_CAP; # 1 small chiller\n act_1_max_cap = CHILLER1_CAP; # 1 big chiller\n act_2_max_cap = CHILLER1_CAP + CHILLER3_CAP; # 1 small 1 big\n act_3_max_cap = CHILLER1_CAP + CHILLER2_CAP; # 2 bigs\n act_4_max_cap = CHILLER1_CAP + CHILLER2_CAP + CHILLER3_CAP; # all chillers\n clg_demand = ob_this_raw[CLG_DMD_IDX];\n org_action_raw = copy.deepcopy(action_raw);\n org_action_raw_idx = action_raw_idx;\n # Check the current cooling demand in which range\n if clg_demand <= act_0_max_cap:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_1_max_cap >= clg_demand > act_0_max_cap:\n if org_action_raw_idx < 1:\n action_ret_idx = np.random.randint(1, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_2_max_cap >= clg_demand > act_1_max_cap:\n if org_action_raw_idx < 2:\n action_ret_idx = np.random.randint(2, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_3_max_cap >= clg_demand > act_2_max_cap:\n if org_action_raw_idx < 3:\n action_ret_idx = np.random.randint(3, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n elif act_4_max_cap >= clg_demand > act_3_max_cap:\n if org_action_raw_idx < 4:\n action_ret_idx = np.random.randint(4, act_num);\n action_ret = act_choices[action_ret_idx];\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = org_action_raw;\n else:\n action_ret_idx = org_action_raw_idx;\n action_ret = act_choice_4;\n\n if action_raw_idx != action_ret_idx:\n if is_show_debug:\n logger.debug('Action function: raw action %s has been changed to %s for '\n 'the demand %s W.'%(action_raw_idx, action_ret_idx, clg_demand));\n return (action_ret, action_ret_idx);", "def _compute_p_value(sequence_size: int, max_excursion: int) -> float:\r\n # Execute first sum\r\n sum_a: float = 0.0\r\n start_k: int = int(math.floor((((float(-sequence_size) / max_excursion) + 1.0) / 4.0)))\r\n end_k: int = int(math.floor((((float(sequence_size) / max_excursion) - 1.0) / 4.0)))\r\n for k in range(start_k, end_k + 1):\r\n c: float = 0.5 * math.erfc(-(((4.0 * k) + 1.0) * max_excursion) / math.sqrt(sequence_size) * math.sqrt(0.5))\r\n d: float = 0.5 * math.erfc(-(((4.0 * k) - 1.0) * max_excursion) / math.sqrt(sequence_size) * math.sqrt(0.5))\r\n sum_a = sum_a + c - d\r\n # Execute second sum\r\n sum_b: float = 0.0\r\n start_k = int(math.floor((((float(-sequence_size) / max_excursion) - 3.0) / 4.0)))\r\n end_k = int(math.floor((((float(sequence_size) / max_excursion) - 1.0) / 4.0)))\r\n for k in range(start_k, end_k + 1):\r\n c: float = 0.5 * math.erfc(-(((4.0 * k) + 3.0) * max_excursion) / math.sqrt(sequence_size) * math.sqrt(0.5))\r\n d: float = 0.5 * math.erfc(-(((4.0 * k) + 1.0) * max_excursion) / math.sqrt(sequence_size) * math.sqrt(0.5))\r\n sum_b = sum_b + c - d\r\n # Return value\r\n return 1.0 - sum_a + sum_b", "def run_qae_optimization(training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n result_list = []\n def proxy(params, training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n \"\"\"Embedded function version\n \"\"\"\n input_list = fix_list(params, all_param_array=all_param, var_param_array=var_param, fixed_vals_array=fixed_vals)\n fidelities = []\n for training_state in training_states:\n fid = cusp_stage2.compute_stage2_cost_function(*input_list, alpha=training_state, n_repetitions=n_repetitions,\n exact=exact, noisy=noisy)\n fidelities.append(fid)\n avg_fid = np.mean(fidelities)\n result_list.append(1-avg_fid)\n print(1-avg_fid)\n return 1. - avg_fid\n\n \n # Initialize parameters\n half_turn_min = 0\n half_turn_max = 2\n init_params = np.random.uniform(low=half_turn_min, high=half_turn_max,\n size=num_param)\n\n # Optimization using Nelder-Mead.\n h2_qae_wrap = lambda params: proxy(params, training_states=training_states,\n n_repetitions=n_repetitions, exact=exact, noisy=noisy)\n \n if noisy:\n maxiter = 60\n else:\n maxiter = None\n \n res = minimize(h2_qae_wrap, init_params, args=(),\n method='Nelder-Mead', tol=None, \n options={'disp': False, 'maxiter': maxiter, 'xatol': 0.001,\n 'return_all': False, 'fatol': 0.001})\n np.savetxt('stage2_data.csv',result_list, delimiter=',')\n return res.x", "def stages(self, stages):\n if stages is None:\n self._stages = None\n else:\n self._stages = stages if isinstance(stages, list) else [stages] * len(self.pidevice.allaxes)\n debug('ControllerStartup.stages = %s', itemstostr(self._stages))", "def evaulate_policy(w, env, number_of_episodes=2):\n\n results = []\n # for e in range(number_of_episodes):\n s_old = env.reset()\n t = 0\n done = False\n while not done:\n # Choose action\n action = None\n if np.inner(w, s_old) > 0:\n action = 1\n else:\n action = 0\n # Take action\n s_new, r, done, _ = env.step(action)\n # env.render()\n\n # Update\n s_old = s_new\n\n t += 1\n if t>200:\n break\n # results.append(t)\n # print(t)\n return t", "def solveOneStep(self):\n ### Student code goes here\n if (self.currentState.state == self.victoryCondition) or (self.currentState not in self.visited):\n self.visited[self.currentState] = True\n win_or_not = self.currentState.state == self.victoryCondition\n return win_or_not\n\n if not self.currentState.nextChildToVisit: \n its = 0\n for movable in self.gm.getMovables():\n its += 1\n # time test\n # too long \n if its == \"too long\":\n return \"too long\"\n #make every move in movable\n self.gm.makeMove(movable)\n new = self.gm.getGameState()\n new_gs = GameState(new, self.currentState.depth+1, movable)\n \n if new_gs not in self.visited:\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.gm.reverseMove(movable) \n \n num_children = len(self.currentState.children)\n if self.currentState.nextChildToVisit < num_children:\n new = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.makeMove(new.requiredMovable)\n self.currentState = new\n #recurse\n return self.solveOneStep()\n else:\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n #recurse\n return self.solveOneStep()", "def phase_Jupiter_1(alpha):\n phase = 10.**(-0.4*(- 3.7e-04*alpha + 6.16e-04*alpha**2.))\n return phase", "def Ballie_PSW_test(n, max_trivial_trials=100):\n for i in range(max_trivial_trials):\n if primes[i] == n:\n return True\n if n % primes[i] == 0:\n return False\n if primes[i] ** 2 >= n:\n return True\n if not fermat_strong_test(n, 2):\n return False\n if not lucas_selfridge_test(n):\n return False\n return True", "def closure_phase_operator(self, redundant=False):\n N = self.nbap\n q = (N - 1) * (N - 2) // 2 if not redundant else N * (N - 1) * (N - 2) // 6\n p = self.nbuv\n base_apertures = [0] if not redundant else list(range(N))\n CPO = np.zeros((q, p))\n CPO_index = 0\n for i in base_apertures:\n for j in range(i+1, N):\n k = np.arange(j + 1, N)\n k = np.delete(k, np.where(k == i))\n if k.size == 0:\n break\n # find baseline indices (b1,b2,b3) from triangle vertices (i,j,k)\n b1 = np.nonzero((self.BLM[:, i] != 0) & (self.BLM[:, j] != 0))[0][0]\n b1 = np.repeat(b1, k.size)\n # b2k and b3k keep track of which k-vertice is associated with the baseline b2 and b3 respectively\n b2, b2k = np.nonzero((self.BLM[:, k] != 0) & (self.BLM[:, j] != 0)[:, np.newaxis])\n b3, b3k = np.nonzero((self.BLM[:, k] != 0) & (self.BLM[:, i] != 0)[:, np.newaxis])\n diag = np.arange(CPO_index, CPO_index + k.size)\n # signs are retrieved from Baseline Map in order to satisfy closure relation: (i - j) + (j - k) + (k - i)\n CPO[diag, b1] += self.BLM[b1, i]\n CPO[diag, b2] += self.BLM[b2, j]\n CPO[diag, b3] += self.BLM[b3, k[b3k]]\n CPO_index += k.size\n return CPO", "def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\"), maximizing_Player = True):\n def max_value(self, game, depth, alpha, beta):\n \"\"\"This is helper function for alpha-beta prunnig on minimax\n Min_value (self, game, depth, alpha, beta)\n\n Parameters:\n game: game state\n depth: search depth\n alpha: search upper limit\n beta: search lower limit\n\n Find maximum score of each game state corresponding to its legal moves\n Set new alpha (search upper limit) if find score higher than current limit\n Return score of that state when search complete.\n \"\"\"\n \n # Timeout Check\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # Get legal moves\n valid_moves = game.get_legal_moves()\n # Best possible score -> initiated at inf, the highest score possible\n best_value = float(\"-inf\")\n \n # Terminal State:\n # When search reaches search limit or no legal moves left\n # Return score of terminal state\n if (depth == 0) or (not valid_moves):\n return self.score(game, self)\n \n # Search each move in legal moves\n for move in valid_moves:\n\n # Update best possible value with current best or search value \n best_value = max(best_value, min_value(self, game.forecast_move(move), depth-1, alpha, beta))\n \n # Update beta when best bossible value is equal or higher than beta\n if (best_value >= beta):\n return best_value\n\n # Update alpha if best possible value is higher than alpha\n alpha = max(best_value, alpha)\n \n # Return best value (in this case max value) \n return best_value\n \n def min_value(self, game, depth, alpha, beta):\n \"\"\"This is helper function for alpha-beta prunnig on minimax\n Min_value (self, game, depth, alpha, beta)\n\n Parameters:\n game: game state\n depth: search depth\n alpha: search upper limit\n beta: search lower limit\n\n Find minimum score of each game state corresponding to its legal moves\n Set new beta (search lower limit) if find score lower than current limit\n Return score of that state when search complete.\n \"\"\"\n \n # Timeout Check\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n \n # Get legal moves\n valid_moves = game.get_legal_moves()\n # Best possible score -> initiated at inf, the highest score possible\n best_value = float(\"inf\")\n \n # Terminal State:\n # When search reaches search limit or no legal moves left\n # Return score of terminal state\n if (depth == 0) or (not valid_moves):\n return self.score(game, self)\n \n # Search each move in legal moves\n for move in valid_moves:\n \n # Update best possible value with current best or search value \n best_value = min(best_value, max_value(self, game.forecast_move(move), depth-1, alpha, beta))\n \n # Update beta when best bossible value is equal or lower than alpha\n if (best_value <= alpha):\n return best_value\n \n # Update alpha if best possible value is lower than beta\n beta = min(best_value, beta)\n \n return best_value\n \n # Timeout Check\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # Main MiniMax Function\n # Get legal moves\n valid_moves = game.get_legal_moves()\n\n # Best possible move -> initiated at (-1,-1)\n # Best possible score -> initiated at -inf, the lowest score possible\n best_score = float(\"-inf\")\n best_move = (-1, -1)\n\n # Terminal State:\n # When no legal moves left return (-1, -1) move to forfeit\n if (depth == 0) or (not valid_moves):\n return (-1, -1)\n \n # Search best move from each move in legal moves\n # Using minimax by first call min_value (helper function)\n # While searching, if any move return better score than current best\n # core, set that move and corresponding score as new target also set new upper limit with maximum score so far\n # Search ends when score is higher than beta\n for move in valid_moves:\n score = min_value(self, game.forecast_move(move), depth -1, alpha, beta)\n if (score > best_score):\n best_score = score\n best_move = move\n alpha = max(alpha, score)\n if best_score >= beta:\n return best_move\n\n return best_move", "def minimum_spanning_arborescence(sol):", "def run_experiment(n, m, start=1, testing=False):\n #From the given m, infer all the life stages:\n life_stages = range(1,m+1)\n \n #Set the starting condition in a dictionary:\n rabbit_dict = {1: start} #in life_stage 1 there is now 1 rabbit pair\n \n #Fill up the dictionary with 0 for each other stage\n for stage in life_stages:\n if stage != 1:\n rabbit_dict[stage] = 0\n else:\n pass\n\n aging_dict = {} #create a copy to move aging rabbits outside the original dictionary\n #This prevents errors in changing the dictionary while reading it\n \n #Now start the calculations for the experiment:\n for month in range(1, n):\n if testing:\n print(\"It is now month: %i\" % month)\n \n #Three things happen each month:\n # 1. rabbits in life stages > 1 reproduce\n newborns = 0\n for stage in rabbit_dict.keys():\n if stage > 1:\n #past stage 1 are adult, which reproduce:\n newborns += rabbit_dict[stage]\n else:\n #else they are juveniles and don't reproduce\n pass\n \n if testing:\n print(\"%i new rabbit pairs are born\" % newborns)\n\n # 2. rabbits in the last life stage (m) die\n if testing:\n print(\"%i rabbit pairs die (of age)\" % rabbit_dict[m])\n \n rabbit_dict[m] = 0\n \n # 3. all remaining rabbits move to the next stage\n for stage in rabbit_dict.keys():\n if stage < m:\n #Only rabbits of stages before the latest age\n aging_dict[stage + 1] = rabbit_dict[stage]\n else:\n #Others don't exist\n pass \n\n rabbit_dict = aging_dict.copy()\n #Only add newborns to the dictionary after all the \n # already existing rabbits have grown to the next stage.\n rabbit_dict[1] = newborns\n \n if testing:\n print(\"At the end of the month, we have this age distribution:\\n%s\\n\" % rabbit_dict)\n print(\"\\n---\\nFinally, after %i months, we end up with %i rabbit pairs\" % (n, sum(rabbit_dict.values())))\n \n return(sum(rabbit_dict.values()))", "def V_belief(s, n):\n\n if n == 0:\n return 0, ''\n else:\n v = {}\n sx_u = {}\n for ui in U: # for each possible action\n sx = B[ui].dot(s) # next belief state\n r = lnc.dot(sx) # expected immediate reward\n v[ui] = r + V_belief(sx, n-1)[0]\n sx_u[ui] = sx\n\n vmax = max(v.values())\n umax = max(v, key=v.get)\n sxmax = sx_u[umax]\n\n return vmax, umax, sxmax", "def get_action_of_alphaBeta(self, game_state: Game):\n self._node = game_state\n actions = []\n func = lambda x: 2.5 * surface_heuristic(game_state, x) + 0.5 * row_col_completeness_heuristic(game_state, x)\n for block in game_state.current_blocks:\n ac = game_state.get_legal_actions(block)\n ac.sort(key=func)\n actions += ac if len(ac) < 10 else ac[:10]\n\n # Initializes with the maximum values for alpha, beta\n alpha, beta = -float('inf'), float('inf')\n best = actions[0]\n possible_move = game_state.generate_successor(best, True)\n # Completes the last 2 moves by the helper agent\n complet_best = self.helper.greedy_search(possible_move)\n\n for action in actions:\n possible_move = game_state.generate_successor(action, True)\n moves = self.helper.greedy_search(possible_move)\n for act in moves:\n possible_move = possible_move.generate_successor(act, True)\n action_list = [action] + moves\n n_alpha = self.AlphaBetaPruning(possible_move, 0, alpha, beta, False, action_list) - possible_move.points\n if n_alpha > alpha:\n best = action\n complet_best = moves\n alpha = n_alpha\n if alpha >= beta:\n break\n best = [best] + complet_best\n self._threes_lists = None\n return best", "def solveProblem027():\n primes = getPrimeRange(0, 5000000)\n biggestConseqPrimes = 0\n coefficients = (0, 0)\n # b has to be a prime number or else n = 0 doesn't come out prime\n bRange = getPrimeRange(0, 1000)\n for a in range(-999, 1000, 1):\n print(a)\n for b in bRange:\n n = 0\n numConseqPrimes = 0\n while True:\n if ((n**2) + (a * n) + b) in primes:\n numConseqPrimes += 1\n n += 1\n else:\n break\n if n > biggestConseqPrimes:\n biggestConseqPrimes = n\n coefficients = (a, b)\n print(\"The coefficients that produce the largest number of consecutive\" \\\n \"primes are a = %d b = %d\" % (coefficients))\n print(\"The product of the coefficients is %d\" % (coefficients[a] * \\\n coefficients[b])\n\nif __name__ == \"__main__\":\n solveProblem027()", "def _transition_probability(self, s, a, s1):\n unreachable_states = [4, # F with prod_score == 4\n 5] # M with prod_score == 0\n\n if s1 in unreachable_states:\n return 0\n else:\n return 1 / (self.n_states - len(unreachable_states))", "def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])", "def powerflow_rule(_m, l, y, s, t):\r\n\r\n return (- m.sigma_27[l, y, s, t] + m.sigma_28[l, y, s, t]\r\n + (m.INCIDENCE_MATRIX[l, self.g(l)] * m.lamb[self.g(l), y, s, t])\r\n + (m.INCIDENCE_MATRIX[l, self.h(l)] * m.lamb[self.h(l), y, s, t])\r\n == 0)", "def advance_stage(self):\n if self.stage == 0:\n self.curr_i = self.I\n elif self.stage == 1:\n self.curr_d = self.D\n elif self.stage == 2:\n self.curr_r == self.R", "def FigA7(case):\n \n #set the parameter, arrays\n \n n_array=np.array([1,2,3])\n\n #set the result arrays\n if case==0:\n class_number=5\n elif case==1:\n class_number=6\n fate=np.zeros([class_number])#number of evolutionary fate\n fate_matrix=np.zeros([np.size(n_array),np.size(fate)])\n \n time=np.linspace(0,100000, 1000000)\n loop=10**6\n \"\"\"\n 0 Co and/or Ch cannot survive in mono-culture\n 1 Co cannot invade\n 2 Only equilibrium of exclusion is stable\n 3 Only equilibrium of coexistence is stable\n 4 Two equilibria are UNstable\n 5 two Equilibrium are stable (which may occur only when sCO vs rCh)\n \"\"\"\n for tri in range(np.size(n_array)):\n counter=0\n n=n_array[tri]\n print(str(\"Hill coefficient is %d\" %(n)))\n fate=np.zeros([class_number])#number of evolutionary fate should be reset\n if case==0 or case==1:\n fname=str('parameter-sweep-MC-n%d-case%d' %(n, case))\n else:\n print(\"Error in case\")\n return 1\n \n for i in range(loop):\n if(i+1)%10000==0:\n print(i+1)\n Ks,cd,T0, alpha,=np.random.uniform(0,1,4)\n Kr,cr=np.random.uniform([Ks,0],[1,1],2)#Kr>Ks and cr.cd\n #check whether r is positive or not\n if case==0:\n r1=rmax*(1-cr-cd)#rCO\n r2=rmax#sCH\n W0Co=r1-dmax*T0**n/(T0**n+Kr**n)-alpha#initial growth of Cooperator\n W0Ch=r2-dmax*T0**n/(T0**n+Ks**n)-alpha#initial growth of Cheater\n elif case==1:\n r1=rmax*(1-cd)#sCo\n r2=rmax*(1-cr)#rCh\n W0Co=r1-dmax*T0**n/(T0**n+Ks**n)-alpha\n W0Ch=r2-dmax*T0**n/(T0**n+Kr**n)-alpha\n stab_e=0#initialize the falgs of stability\n stab_c=0\n if W0Co<0 or W0Ch<0:\n fate[0]+=1\n res=0\n else:\n #succeed in mono-culture \n init=np.array([T0,10**(-6)])\n if case==0: \n solCo=odeint(DyCoop, init, time, args=(T0, r1, Kr, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Ks, alpha, n))\n x2s=solCh[-1,1]\n else:\n solCo=odeint(DyCoop, init, time, args=(T0, r1, Ks, alpha, n))\n Ts=solCo[-1,0]\n #x1s=solCo[-1,1]\n solCh=odeint(DyCheat, init, time, args=(T0, r2, Kr, alpha, n))\n x2s=solCh[-1,1]\n \n #Evolutionary dynamics \n if case==0:\n K=Kr\n else:\n K=Ks\n if r1*(1-x2s)-dmax*T0**n/(T0**n+K**n)<alpha:\n #Co cannot invade\n fate[1]+=1\n res=1\n else:\n #Co can invade\n #calculate Tdagger Td and check whether coexist or exclude\n if case==0:\n #rCo vs sCh\n #in this case, at most one equilbrium is stable\n tau=Quad(case,alpha,cr+cd,0,Kr, Ks, n)\n Td=tau**(1/n)\n if Td<Ts:\n #Co exclude Ch\n fate[2]+=1\n res=2\n else:\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #unstable coexistence nor exclusion\n fate[4]+=1\n res=4\n print(Td, x1d, x2d)\n else:\n #sCo vs rCh\n # in this case two equilibria can be stable at the same time\n [tau_p,tau_m]=Quad(case,alpha,cd,cr,Ks, Kr, n)\n if tau_m>Ts**n or tau_p<Ts**n:\n # cexclusion is stable\n stab_e=1\n # stability in coexistence \n if tau_p<0:\n stab_c=0\n else:\n Td=tau_p**(1/n)\n x1d=alpha*Kd*(T0-Td)/(fmax*Td-alpha*(T0-Td))\n x2d=1-x1d-(dmax*Td**n/(Td**n+K**n)+alpha)/r1\n #check the stability condition\n stab=Stab_cond(alpha, T0, Td,x1d,x2d, r1,r2,n, K)\n if stab==0:\n #stable coexistence\n stab_c=1\n #classify\n if stab_e==1 and stab_c==1:\n # two stable equilbria\n fate[5]+=1\n res=5\n elif stab_e==1 and stab_c==0:\n #only stable cexclusion\n fate[2]+=1\n res=2\n elif stab_e==0 and stab_c==1:\n #stable coexistence\n fate[3]+=1\n res=3\n else:\n #both unstable\n fate[4]+=1\n res=4\n \n #save the results\n if counter==0:\n result=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n #save the result with parameter values\n \n else:\n #add array of results\n R=np.array([[Ks, Kr, cr, cd, alpha, T0,res]])\n result=np.concatenate((result, R), axis=0)\n counter+=1\n \n #save csv file and graph\n np.savetxt(fname+'.csv',result, delimiter=',', header='Ks, Kr, cr, cd, alpha, T0, class', fmt='%.6f') \n print(fate)\n fate_matrix[tri,:]=fate \n if case==0: \n np.savetxt('parameter_sweep_MC_total_case0.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4', fmt='%d')\n else:\n np.savetxt('parameter_sweep_MC_total_case1.csv',fate_matrix, delimiter=',', header='cl0,l1,cl2,cl3,cl4,cl5', fmt='%d')\n Plot(case)" ]
[ "0.6784844", "0.6024626", "0.5881136", "0.5734935", "0.5676947", "0.5604572", "0.5486235", "0.5474955", "0.5474955", "0.5469111", "0.5425464", "0.5390522", "0.53749573", "0.5342366", "0.53054893", "0.5284547", "0.5250292", "0.5236084", "0.52332866", "0.521916", "0.5216811", "0.5216433", "0.5174325", "0.5165616", "0.5158835", "0.51307577", "0.5128425", "0.51235425", "0.51034033", "0.50997543", "0.5099656", "0.50962317", "0.50959396", "0.5082655", "0.50519407", "0.5044193", "0.50406355", "0.50388926", "0.50331986", "0.5031843", "0.50305533", "0.50192267", "0.50088704", "0.5005388", "0.49967515", "0.49965817", "0.49963945", "0.49936703", "0.49890196", "0.49837044", "0.4983133", "0.4976584", "0.49753425", "0.49567595", "0.49561808", "0.4946682", "0.49424833", "0.49171925", "0.49049923", "0.48962873", "0.48919886", "0.48877135", "0.48786312", "0.48757362", "0.48692256", "0.48690847", "0.48663822", "0.48649266", "0.48611248", "0.48597443", "0.48494014", "0.48488754", "0.48470923", "0.48429227", "0.4841483", "0.48338458", "0.4831153", "0.48225844", "0.48225844", "0.48205152", "0.48204798", "0.48178408", "0.48151472", "0.48108807", "0.48069382", "0.48054513", "0.47950128", "0.47918823", "0.47880957", "0.4783303", "0.4780616", "0.47764525", "0.4775905", "0.47755557", "0.4769781", "0.47662133", "0.4765587", "0.47649735", "0.47648433", "0.47570947", "0.47570884" ]
0.0
-1
DP to compute optimal latency and number of pipeline stages and mapping of stages to compute cluster submeshes.
def inter_op_dp( n_layers: int, n_devices: int, n_microbatches: int, submesh_shapes: List[Tuple[int, int]], intra_compute_costs, max_n_succ_stages, ): min_cost = np.inf best_solution = None prev_intra_cost = 0.0 gap = 1e-6 submesh_sizes: list = NumbaList() for n, m in submesh_shapes: submesh_sizes.append(n * m) for intra_cost in np.sort(np.unique(intra_compute_costs)): if intra_cost - prev_intra_cost < gap: continue if intra_cost * n_microbatches >= min_cost: break # Optimization that lifts a check for stage_cost <= t_max_stage_cost # out of the inner dp loop (see alpa/~/stage_construction.py#L121). # This yields a ~100-200x improvement over the baseline implementation. valid_cost_idxs = np.transpose((intra_compute_costs <= intra_cost).nonzero()) # This corresponds to the i of k <= i <= K from eqn. 3 in the alpa paper. valid_cost_idxs = valid_cost_idxs[ valid_cost_idxs[:, 0] <= valid_cost_idxs[:, 1] ] valid_costs = intra_compute_costs[tuple(valid_cost_idxs.T)] valid_idxs_costs = np.hstack([valid_cost_idxs, valid_costs[:, np.newaxis]]) F, F_stage_max, F_argmin = inter_op_dp_inner_loop( n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages, ) best_n_stages = F[:, 0, n_devices].argmin() all_stages_cost = F[best_n_stages, 0, n_devices] slowest_stage_cost = F_stage_max[best_n_stages, 0, n_devices] if np.isinf(all_stages_cost): continue slowest_stage_total_cost = (n_microbatches - 1) * slowest_stage_cost if all_stages_cost + slowest_stage_total_cost < min_cost: min_cost = all_stages_cost + slowest_stage_total_cost best_solution = best_n_stages, F_argmin prev_intra_cost = intra_cost assert best_solution is not None best_n_stages, F_argmin = best_solution optimal_layer_submesh_assignments = get_optimal_submesh_assignments( best_n_stages, F_argmin, n_devices, n_layers, submesh_sizes ) return optimal_layer_submesh_assignments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def N_stages(self):\n return 5", "def calculate_cpu_parameters(self):\n\n # Calculate the cpu parameters, needed for the\n # vpp_startup and grub configuration\n for i in self._nodes.items():\n node = i[1]\n\n # get total number of nic ports\n interfaces = node[\"interfaces\"]\n\n # Make a list of ports by numa node\n ports_per_numa = self._create_ports_per_numa(node, interfaces)\n\n # Get the number of cpus to skip, we never use the first cpu\n other_cpus_start = 1\n other_cpus_end = other_cpus_start + node[\"cpu\"][\"total_other_cpus\"] - 1\n other_workers = None\n if other_cpus_end != 0:\n other_workers = (other_cpus_start, other_cpus_end)\n node[\"cpu\"][\"other_workers\"] = other_workers\n\n # Allocate the VPP main core and workers\n vpp_workers = []\n reserve_vpp_main_core = node[\"cpu\"][\"reserve_vpp_main_core\"]\n total_vpp_cpus = node[\"cpu\"][\"total_vpp_cpus\"]\n total_rx_queues = node[\"cpu\"][\"total_rx_queues\"]\n\n # If total_vpp_cpus is 0 or is less than the numa nodes with ports\n # then we shouldn't get workers\n total_workers_node = 0\n if len(ports_per_numa):\n total_workers_node = total_vpp_cpus // len(ports_per_numa)\n total_main = 0\n if reserve_vpp_main_core:\n total_main = 1\n total_mbufs = 0\n if total_main + total_workers_node != 0:\n for item in ports_per_numa.items():\n numa_node = item[0]\n value = item[1]\n\n # Get the number of descriptors and queues\n mbufs = self._calc_desc_and_queues(\n len(ports_per_numa),\n len(value[\"interfaces\"]),\n total_rx_queues,\n value,\n )\n total_mbufs += mbufs\n\n # Get the VPP workers\n reserve_vpp_main_core = self._calc_vpp_workers(\n node,\n vpp_workers,\n numa_node,\n other_cpus_end,\n total_workers_node,\n reserve_vpp_main_core,\n )\n\n total_mbufs *= 2.5\n total_mbufs = int(total_mbufs)\n else:\n total_mbufs = 0\n\n # Save the info\n node[\"cpu\"][\"vpp_workers\"] = vpp_workers\n node[\"cpu\"][\"total_mbufs\"] = total_mbufs\n\n # Write the config\n self.updateconfig()", "def num_stages(self) -> int:\n return self.pg_mesh.size(self.pipeline_axis)", "def stage(self):\n\n # prepare projected land allocation data\n self.prep_projected()\n\n # prepare base land use data\n self.prep_base()\n\n # harmonize grid area between projected and base layer land allocation\n self.harmony()\n\n # apply constraints\n self.set_constraints()\n\n # create kernel density filter if not running multiple jobs\n self.kernel_filter()\n\n # set data for step zero\n self.set_step_zero()", "def cal_topology_feature(self):\n self.NPL()\n self.topo_efficiency_cal()\n self.efficiency_cal()\n self.cluster_cal()\n self.topo_diameter()\n self.spatial_diameter()", "def main_pipeline(self, image):\n # detection\n t0 = datetime.now()\n bbox_list, score_list, label_list = self.det.inference(image)\n t1 = datetime.now()\n logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1)))\n \n # estimation\n t0 = datetime.now()\n disp = self.est.inference(image)\n depth_list = self.est.calc_depth(bbox_list)\n t1 = datetime.now()\n logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1)))\n \n # tracker predict\n t0 = datetime.now()\n for t in self.t_list:\n t.predict()\n t1 = datetime.now()\n logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1)))\n \n # associate\n t0 = datetime.now()\n matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list)\n t1 = datetime.now()\n logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1)))\n \n t0 = datetime.now()\n # update trackers for matched_pair\n for m in matched_pair:\n t = self.t_list[m[1]]\n bbox = bbox_list[m[0]]\n depth = depth_list[m[0]]\n est_dict = {\n 'label': label_list[m[0]],\n 'score': score_list[m[0]]}\n t.update(self.frame_idx, bbox, depth, est_dict)\n \n # update in-track status of all trackers\n for t in self.t_list:\n t.update_status(self.frame_idx)\n \n # purge out dead trackers\n self.t_list = [t for t in self.t_list if t.get_status()]\n\n # create new trackers for unmatched_bbox_list\n for b_idx in unmatched_bbox_list:\n bbox = bbox_list[b_idx]\n depth = depth_list[b_idx]\n est_dict = {\n 'label': label_list[b_idx],\n 'score': score_list[b_idx]}\n self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict))\n self.tid_new += 1\n\n t1 = datetime.now()\n logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1)))\n\n # disparity map for display\n return disp", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def on_stage_start(self, stage, epoch):\n if stage != sb.Stage.TRAIN:\n self.cer_metric = self.hparams.cer_computer()\n self.wer_metric = self.hparams.error_rate_computer()", "def search(self, pid, start, layers):\n plan = []\n workload = [0 for _ in range(len(self.workers))]\n\n # each layer is a separate search for the worker to process the layer\n for i in range(len(layers)):\n layer = layers[i]\n target_color = layer[\"color\"]\n target_thickness = layer[\"thickness\"]\n processing_costs = {k: math.ceil(target_thickness / self.processing_rate[k][target_color]) for k in self.processing_rate}\n\n # Searches to find the cost of processing every node at each worker.\n # Cost consists of: Cost of the path \n # + Existing workload cost \n # + processing cost by the worker\n # \n # Basically Dijkstra's.\n visited = set()\n path = {}\n path_costs = {}\n pq = [(0, start)]\n curr_costs = {}\n\n # Assumes single connected component \n while len(visited) != len(self.workers):\n cost, curr = heapq.heappop(pq)\n if curr in visited: continue\n visited.add(curr)\n curr_costs[curr] = cost + processing_costs[self.worker_flavor[curr]] + self.workload[curr]\n if curr == self.origin:\n curr_costs[curr] += self.origin_penalty\n for neighbor in self.neighbors[curr]:\n if neighbor in visited: continue\n cost_new = cost + 1 \n if neighbor == self.origin:\n cost_new += self.origin_penalty\n if neighbor not in path_costs or cost_new < path_costs[neighbor]:\n path_costs[neighbor] = cost_new\n path[neighbor] = curr\n heapq.heappush(pq, (cost_new, neighbor))\n\n # Get the best cost and candidate for processing the current layer\n best_cost = float(\"inf\")\n best_cand = -1\n for cand in curr_costs:\n if curr_costs[cand] < best_cost:\n best_cost = curr_costs[cand]\n best_cand = cand\n\n # If the best candidate isn't the starting node, add the cost of the\n # path for future workload considerations\n if best_cand != start:\n # create the path \n best_path = [best_cand]\n while best_path[-1] != start:\n best_path.append(path[best_path[-1]])\n best_path = best_path[::-1]\n\n # Add the Pass operations to the plan\n prev = start \n for curr in best_path[1:]:\n workload[prev] += 1\n plan.append([1, {\"Pass\":{\"pearl_id\":pid,\"to_worker\":curr}}])\n prev = curr\n\n # Add the noms to the plan \n workload[best_cand] += processing_costs[self.worker_flavor[best_cand]]\n plan.append([processing_costs[self.worker_flavor[best_cand]], {\"Nom\": pid}])\n\n # Set the last worker in the path as the start of the next search pass\n start = best_cand\n return plan, workload, start", "def __stage1(self, image, scales: list, stage_status: StageStatus):\r\n total_boxes = np.empty((0, 9))\r\n status = stage_status\r\n\r\n for scale in scales:\r\n scaled_image = self.__scale_image(image, scale)\r\n\r\n img_x = np.expand_dims(scaled_image, 0)\r\n img_y = np.transpose(img_x, (0, 2, 1, 3))\r\n\r\n out = self._pnet.run(img_y)\r\n\r\n out0 = np.transpose(out[0], (0, 2, 1, 3))\r\n out1 = np.transpose(out[1], (0, 2, 1, 3))\r\n\r\n boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(),\r\n out0[0, :, :, :].copy(), scale, self._steps_threshold[0])\r\n\r\n # inter-scale nms\r\n pick = self.__nms(boxes.copy(), 0.5, 'Union')\r\n if boxes.size > 0 and pick.size > 0:\r\n boxes = boxes[pick, :]\r\n total_boxes = np.append(total_boxes, boxes, axis=0)\r\n\r\n numboxes = total_boxes.shape[0]\r\n\r\n if numboxes > 0:\r\n pick = self.__nms(total_boxes.copy(), 0.7, 'Union')\r\n total_boxes = total_boxes[pick, :]\r\n\r\n regw = total_boxes[:, 2] - total_boxes[:, 0]\r\n regh = total_boxes[:, 3] - total_boxes[:, 1]\r\n\r\n qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw\r\n qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh\r\n qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw\r\n qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh\r\n\r\n total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))\r\n total_boxes = self.__rerec(total_boxes.copy())\r\n\r\n total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)\r\n status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),\r\n width=stage_status.width, height=stage_status.height)\r\n\r\n return total_boxes, status", "def _get_default_pipeline_params(\n project: str,\n location: str,\n root_dir: str,\n target_column: str,\n prediction_type: str,\n optimization_objective: str,\n transformations: str,\n train_budget_milli_node_hours: float,\n stage_1_num_parallel_trials: Optional[int] = None,\n stage_2_num_parallel_trials: Optional[int] = None,\n stage_2_num_selected_trials: Optional[int] = None,\n data_source_csv_filenames: Optional[str] = None,\n data_source_bigquery_table_path: Optional[str] = None,\n predefined_split_key: Optional[str] = None,\n timestamp_split_key: Optional[str] = None,\n stratified_split_key: Optional[str] = None,\n training_fraction: Optional[float] = None,\n validation_fraction: Optional[float] = None,\n test_fraction: Optional[float] = None,\n weight_column: Optional[float] = None,\n study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,\n optimization_objective_recall_value: Optional[float] = None,\n optimization_objective_precision_value: Optional[float] = None,\n stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n export_additional_model_without_custom_ops: bool = False,\n stats_and_example_gen_dataflow_machine_type: Optional[str] = None,\n stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,\n stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,\n transform_dataflow_machine_type: Optional[str] = None,\n transform_dataflow_max_num_workers: Optional[int] = None,\n transform_dataflow_disk_size_gb: Optional[int] = None,\n dataflow_subnetwork: Optional[str] = None,\n dataflow_use_public_ips: bool = True,\n encryption_spec_key_name: Optional[str] = None,\n additional_experiments: Optional[Dict[str, Any]] = None,\n dataflow_service_account: Optional[str] = None,\n max_selected_features: Optional[int] = None,\n apply_feature_selection_tuning: bool = False,\n run_evaluation: bool = True,\n evaluation_batch_predict_machine_type: Optional[str] = None,\n evaluation_batch_predict_starting_replica_count: Optional[int] = None,\n evaluation_batch_predict_max_replica_count: Optional[int] = None,\n evaluation_batch_explain_machine_type: Optional[str] = None,\n evaluation_batch_explain_starting_replica_count: Optional[int] = None,\n evaluation_batch_explain_max_replica_count: Optional[int] = None,\n evaluation_dataflow_machine_type: Optional[str] = None,\n evaluation_dataflow_starting_num_workers: Optional[int] = None,\n evaluation_dataflow_max_num_workers: Optional[int] = None,\n evaluation_dataflow_disk_size_gb: Optional[int] = None,\n run_distillation: bool = False,\n distill_batch_predict_machine_type: Optional[str] = None,\n distill_batch_predict_starting_replica_count: Optional[int] = None,\n distill_batch_predict_max_replica_count: Optional[int] = None,\n stage_1_tuning_result_artifact_uri: Optional[str] = None,\n quantiles: Optional[List[float]] = None,\n enable_probabilistic_inference: bool = False,\n num_selected_features: Optional[int] = None,\n model_display_name: str = '',\n model_description: str = '',\n) -> Dict[str, Any]:\n if not study_spec_parameters_override:\n study_spec_parameters_override = []\n if not stage_1_tuner_worker_pool_specs_override:\n stage_1_tuner_worker_pool_specs_override = []\n if not cv_trainer_worker_pool_specs_override:\n cv_trainer_worker_pool_specs_override = []\n if not quantiles:\n quantiles = []\n\n parameter_values = {}\n parameters = {\n 'project': project,\n 'location': location,\n 'root_dir': root_dir,\n 'target_column': target_column,\n 'prediction_type': prediction_type,\n 'data_source_csv_filenames': data_source_csv_filenames,\n 'data_source_bigquery_table_path': data_source_bigquery_table_path,\n 'predefined_split_key': predefined_split_key,\n 'timestamp_split_key': timestamp_split_key,\n 'stratified_split_key': stratified_split_key,\n 'training_fraction': training_fraction,\n 'validation_fraction': validation_fraction,\n 'test_fraction': test_fraction,\n 'optimization_objective': optimization_objective,\n 'train_budget_milli_node_hours': train_budget_milli_node_hours,\n 'stage_1_num_parallel_trials': stage_1_num_parallel_trials,\n 'stage_2_num_parallel_trials': stage_2_num_parallel_trials,\n 'stage_2_num_selected_trials': stage_2_num_selected_trials,\n 'weight_column': weight_column,\n 'optimization_objective_recall_value': (\n optimization_objective_recall_value\n ),\n 'optimization_objective_precision_value': (\n optimization_objective_precision_value\n ),\n 'study_spec_parameters_override': study_spec_parameters_override,\n 'stage_1_tuner_worker_pool_specs_override': (\n stage_1_tuner_worker_pool_specs_override\n ),\n 'cv_trainer_worker_pool_specs_override': (\n cv_trainer_worker_pool_specs_override\n ),\n 'export_additional_model_without_custom_ops': (\n export_additional_model_without_custom_ops\n ),\n 'dataflow_subnetwork': dataflow_subnetwork,\n 'dataflow_use_public_ips': dataflow_use_public_ips,\n 'dataflow_service_account': dataflow_service_account,\n 'encryption_spec_key_name': encryption_spec_key_name,\n 'max_selected_features': max_selected_features,\n 'stage_1_tuning_result_artifact_uri': stage_1_tuning_result_artifact_uri,\n 'quantiles': quantiles,\n 'enable_probabilistic_inference': enable_probabilistic_inference,\n 'model_display_name': model_display_name,\n 'model_description': model_description,\n }\n parameter_values.update(\n {param: value for param, value in parameters.items() if value is not None}\n )\n\n if run_evaluation:\n eval_parameters = {\n 'evaluation_batch_predict_machine_type': (\n evaluation_batch_predict_machine_type\n ),\n 'evaluation_batch_predict_starting_replica_count': (\n evaluation_batch_predict_starting_replica_count\n ),\n 'evaluation_batch_predict_max_replica_count': (\n evaluation_batch_predict_max_replica_count\n ),\n 'evaluation_batch_explain_machine_type': (\n evaluation_batch_explain_machine_type\n ),\n 'evaluation_batch_explain_starting_replica_count': (\n evaluation_batch_explain_starting_replica_count\n ),\n 'evaluation_batch_explain_max_replica_count': (\n evaluation_batch_explain_max_replica_count\n ),\n 'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,\n 'evaluation_dataflow_starting_num_workers': (\n evaluation_dataflow_starting_num_workers\n ),\n 'evaluation_dataflow_max_num_workers': (\n evaluation_dataflow_max_num_workers\n ),\n 'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,\n 'run_evaluation': run_evaluation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in eval_parameters.items()\n if value is not None\n }\n )\n\n # V1 pipeline without FTE\n if num_selected_features is None:\n if not additional_experiments:\n additional_experiments = {}\n\n parameters = {\n 'transformations': transformations,\n 'stats_and_example_gen_dataflow_machine_type': (\n stats_and_example_gen_dataflow_machine_type\n ),\n 'stats_and_example_gen_dataflow_max_num_workers': (\n stats_and_example_gen_dataflow_max_num_workers\n ),\n 'stats_and_example_gen_dataflow_disk_size_gb': (\n stats_and_example_gen_dataflow_disk_size_gb\n ),\n 'transform_dataflow_machine_type': transform_dataflow_machine_type,\n 'transform_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,\n 'additional_experiments': additional_experiments,\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n if apply_feature_selection_tuning:\n parameter_values.update({\n 'apply_feature_selection_tuning': apply_feature_selection_tuning,\n })\n\n if run_distillation:\n distillation_parameters = {\n 'distill_batch_predict_machine_type': (\n distill_batch_predict_machine_type\n ),\n 'distill_batch_predict_starting_replica_count': (\n distill_batch_predict_starting_replica_count\n ),\n 'distill_batch_predict_max_replica_count': (\n distill_batch_predict_max_replica_count\n ),\n 'run_distillation': run_distillation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in distillation_parameters.items()\n if value is not None\n }\n )\n\n # V2 pipeline (with FTE)\n else:\n if run_distillation:\n raise ValueError(\n 'Distillation is currently not supported'\n ' when num_selected_features is specified.'\n )\n\n parameters = {\n 'num_selected_features': num_selected_features,\n 'dataset_level_custom_transformation_definitions': [],\n 'dataset_level_transformations': [],\n 'tf_auto_transform_features': {},\n 'tf_custom_transformation_definitions': [],\n 'legacy_transformations_path': transformations,\n 'feature_transform_engine_dataflow_machine_type': (\n transform_dataflow_machine_type\n ),\n 'feature_transform_engine_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'feature_transform_engine_dataflow_disk_size_gb': (\n transform_dataflow_disk_size_gb\n ),\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n return parameter_values", "def optimization(self, pipeline, max_evals):\n hyperparameter = self.hyperparameter_tunning(pipeline, max_evals)\n self.pipeline_dict['hyperparameter'] = hyperparameter", "def compute_map(current_agent_id,agent_order,number_of_timestep,state_schedules, conv :StateConverter):\r\n #Find the agent has the highest number of time steps\r\n highest_timestep = 0\r\n # Find the highest time step\r\n if len(number_of_timestep) >0:\r\n highest_timestep = np.max(number_of_timestep)\r\n occupancy_map = []\r\n # Since we don't know yet how many time step of the current id so\r\n # the number of time steps of the occupancy map == highest number of time step\r\n # of the current schedule\r\n for time_step in range(int(highest_timestep)):\r\n # Initialize the occupancy for current time step\r\n current_occupancy_map = np.zeros(conv.num_tiles)\r\n # We loop through schedule of each agent at current time step\r\n for i in range(len(state_schedules)):\r\n # Get the agent id of current schedule\r\n agent_of_schedule = agent_order[i]\r\n if time_step < len(state_schedules[i]):\r\n # The first case when the agent of current schedule is executed after the current agent\r\n if agent_of_schedule > current_agent_id:\r\n # Get the current state\r\n current_state = state_schedules[i][time_step]\r\n # Convert the current state to tile index\r\n current_tile = conv.state_to_tile(current_state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[current_tile] = 1\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n # The second case when the agent of current schedule is executed before the current agent\r\n else:\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n if time_step + 2 < len(state_schedules[i]):\r\n # Get the next 2 state\r\n next_2state = state_schedules[i][time_step+2]\r\n # Convert the current state to tile index\r\n next_2tile = conv.state_to_tile(next_2state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[next_2tile] = 1\r\n occupancy_map.append(current_occupancy_map)\r\n return occupancy_map", "def compute_stats(self, train_loader):\n for i, stage in enumerate(self.stages):\n stage.encoder.spectrogram.compute_stats(train_loader, i)", "def update_dependencies():\r\n\r\n if par['task'] == 'mnist':\r\n par['n_tasks'] = 100\r\n par['input_shape'] = [28, 28]\r\n par['n_input'] = np.product(par['input_shape'])\r\n par['n_output'] = 10\r\n elif par['task'] == 'omniglot':\r\n par['input_shape'] = [26, 26]\r\n par['n_input'] = 256 if par['conv_input'] else np.product(par['input_shape'])\r\n par['n_output'] = par['n_ways'] #par['n_meta_tasks'] + par['n_test_tasks']\r\n\r\n par['layer_dims'] = [par['n_input']] + par['hidden_layers'] + [par['n_output']]\r\n\r\n\r\n par['n_layers'] = len(par['layer_dims'])\r\n if par['task'] == 'mnist' or par['task'] == 'imagenet':\r\n par['labels_per_task'] = 10\r\n elif par['task'] == 'cifar':\r\n par['labels_per_task'] = 5", "def compute(self):\n # some possible parameters:\n # minimum size: min node count for a cluster\n # (should be small, or some fraction of total graph size)\n # num splits: desired number of resultant partitions. Partitioning will continue until there are no more \"parallelizing splits\", or the total partition count is >= num splits\n minSplits = 3\n clustermetalist = []\n (roots, inters) = self.rootSplit(self.cluster)\n clustermetalist.append(roots)\n if (len(roots) + len(inters)) < minSplits:\n # split intersects.\n inters = map(self.rootSplit, inters)\n clustermetalist.append(inters[0])\n clustermetalist.append(inters[1])\n else:\n clustermetalist.append(inters)\n \n print \"nodes\", len(self.cluster)\n print \"roots\", len(roots)\n self.ready = clustermetalist\n # The metalist is a list of lists of clusters.\n # list[0] is a list of clusters that are ready for execution.\n # list[1] is a list of clusters that are ready after all clusters\n # in list[0] are complete. Some or all clusters may be ready\n # earlier, but each cluster requires some finite progress in one\n # or more clusters in list[0], otherwise the cluster could be\n # placed in list[0].\n # list[i+1] is related to list[i] similarly as list[1] is related\n # to list[0]\n open(\"pass1.dot\",\"w\").write(self.makeStateGraph(\"pass1\",roots))\n pass", "def get_default_pipeline_and_parameters(\n project: str,\n location: str,\n root_dir: str,\n target_column_name: str,\n prediction_type: str,\n optimization_objective: str,\n transformations: Dict[str, Any],\n split_spec: Dict[str, Any],\n data_source: Dict[str, Any],\n train_budget_milli_node_hours: float,\n stage_1_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,\n stage_2_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,\n stage_2_num_selected_trials: int = _DEFAULT_STAGE_2_NUM_SELECTED_TRAILS,\n weight_column_name: str = '',\n study_spec_override: Optional[Dict[str, Any]] = None,\n optimization_objective_recall_value: float = -1,\n optimization_objective_precision_value: float = -1,\n stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n export_additional_model_without_custom_ops: bool = False,\n stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',\n stats_and_example_gen_dataflow_max_num_workers: int = 25,\n stats_and_example_gen_dataflow_disk_size_gb: int = 40,\n transform_dataflow_machine_type: str = 'n1-standard-16',\n transform_dataflow_max_num_workers: int = 25,\n transform_dataflow_disk_size_gb: int = 40,\n dataflow_subnetwork: str = '',\n dataflow_use_public_ips: bool = True,\n encryption_spec_key_name: str = '',\n additional_experiments: Optional[Dict[str, Any]] = None,\n dataflow_service_account: str = '',\n run_evaluation: bool = True,\n evaluation_batch_predict_machine_type: str = _EVALUATION_BATCH_PREDICT_MACHINE_TYPE,\n evaluation_batch_predict_starting_replica_count: int = _EVALUATION_BATCH_PREDICT_STARTING_REPLICA_COUNT,\n evaluation_batch_predict_max_replica_count: int = _EVALUATION_BATCH_PREDICT_MAX_REPLICA_COUNT,\n evaluation_dataflow_machine_type: str = _EVALUATION_DATAFLOW_MACHINE_TYPE,\n evaluation_dataflow_max_num_workers: int = _EVALUATION_DATAFLOW_MAX_NUM_WORKERS,\n evaluation_dataflow_disk_size_gb: int = _EVALUATION_DATAFLOW_DISK_SIZE_GB,\n run_distillation: bool = False,\n distill_batch_predict_machine_type: str = 'n1-standard-16',\n distill_batch_predict_starting_replica_count: int = 25,\n distill_batch_predict_max_replica_count: int = 25,\n) -> Tuple[str, Dict[str, Any]]:\n warnings.warn(\n 'This method is deprecated,'\n ' please use get_automl_tabular_pipeline_and_parameters instead.'\n )\n\n if stage_1_num_parallel_trials <= 0:\n stage_1_num_parallel_trials = _DEFAULT_NUM_PARALLEL_TRAILS\n\n if stage_2_num_parallel_trials <= 0:\n stage_2_num_parallel_trials = _DEFAULT_NUM_PARALLEL_TRAILS\n\n hours = float(train_budget_milli_node_hours) / 1000.0\n multiplier = stage_1_num_parallel_trials * hours / 500.0\n stage_1_single_run_max_secs = int(math.sqrt(multiplier) * 2400.0)\n phase_2_rounds = int(\n math.sqrt(multiplier) * 100 / stage_2_num_parallel_trials + 0.5\n )\n if phase_2_rounds < 1:\n phase_2_rounds = 1\n\n # All of magic number \"1.3\" above is because the trial doesn't always finish\n # in time_per_trial. 1.3 is an empirical safety margin here.\n stage_1_deadline_secs = int(\n hours * 3600.0 - 1.3 * stage_1_single_run_max_secs * phase_2_rounds\n )\n\n if stage_1_deadline_secs < hours * 3600.0 * 0.5:\n stage_1_deadline_secs = int(hours * 3600.0 * 0.5)\n # Phase 1 deadline is the same as phase 2 deadline in this case. Phase 2\n # can't finish in time after the deadline is cut, so adjust the time per\n # trial to meet the deadline.\n stage_1_single_run_max_secs = int(\n stage_1_deadline_secs / (1.3 * phase_2_rounds)\n )\n\n reduce_search_space_mode = 'minimal'\n if multiplier > 2:\n reduce_search_space_mode = 'regular'\n if multiplier > 4:\n reduce_search_space_mode = 'full'\n\n # Stage 2 number of trials is stage_1_num_selected_trials *\n # _NUM_FOLDS, which should be equal to phase_2_rounds *\n # stage_2_num_parallel_trials. Use this information to calculate\n # stage_1_num_selected_trials:\n stage_1_num_selected_trials = int(\n phase_2_rounds * stage_2_num_parallel_trials / _NUM_FOLDS\n )\n stage_1_deadline_hours = stage_1_deadline_secs / 3600.0\n\n stage_2_deadline_hours = hours - stage_1_deadline_hours\n stage_2_single_run_max_secs = stage_1_single_run_max_secs\n\n parameter_values = {\n 'project': project,\n 'location': location,\n 'root_dir': root_dir,\n 'target_column_name': target_column_name,\n 'prediction_type': prediction_type,\n 'optimization_objective': optimization_objective,\n 'transformations': input_dictionary_to_parameter(transformations),\n 'split_spec': input_dictionary_to_parameter(split_spec),\n 'data_source': input_dictionary_to_parameter(data_source),\n 'stage_1_deadline_hours': stage_1_deadline_hours,\n 'stage_1_num_parallel_trials': stage_1_num_parallel_trials,\n 'stage_1_num_selected_trials': stage_1_num_selected_trials,\n 'stage_1_single_run_max_secs': stage_1_single_run_max_secs,\n 'reduce_search_space_mode': reduce_search_space_mode,\n 'stage_2_deadline_hours': stage_2_deadline_hours,\n 'stage_2_num_parallel_trials': stage_2_num_parallel_trials,\n 'stage_2_num_selected_trials': stage_2_num_selected_trials,\n 'stage_2_single_run_max_secs': stage_2_single_run_max_secs,\n 'weight_column_name': weight_column_name,\n 'optimization_objective_recall_value': (\n optimization_objective_recall_value\n ),\n 'optimization_objective_precision_value': (\n optimization_objective_precision_value\n ),\n 'study_spec_override': input_dictionary_to_parameter(study_spec_override),\n 'stage_1_tuner_worker_pool_specs_override': input_dictionary_to_parameter(\n stage_1_tuner_worker_pool_specs_override\n ),\n 'cv_trainer_worker_pool_specs_override': input_dictionary_to_parameter(\n cv_trainer_worker_pool_specs_override\n ),\n 'export_additional_model_without_custom_ops': (\n export_additional_model_without_custom_ops\n ),\n 'stats_and_example_gen_dataflow_machine_type': (\n stats_and_example_gen_dataflow_machine_type\n ),\n 'stats_and_example_gen_dataflow_max_num_workers': (\n stats_and_example_gen_dataflow_max_num_workers\n ),\n 'stats_and_example_gen_dataflow_disk_size_gb': (\n stats_and_example_gen_dataflow_disk_size_gb\n ),\n 'transform_dataflow_machine_type': transform_dataflow_machine_type,\n 'transform_dataflow_max_num_workers': transform_dataflow_max_num_workers,\n 'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,\n 'dataflow_subnetwork': dataflow_subnetwork,\n 'dataflow_use_public_ips': dataflow_use_public_ips,\n 'encryption_spec_key_name': encryption_spec_key_name,\n }\n if additional_experiments:\n parameter_values.update(\n {\n 'additional_experiments': input_dictionary_to_parameter(\n additional_experiments\n )\n }\n )\n if run_evaluation:\n parameter_values.update({\n 'dataflow_service_account': dataflow_service_account,\n 'evaluation_batch_predict_machine_type': (\n evaluation_batch_predict_machine_type\n ),\n 'evaluation_batch_predict_starting_replica_count': (\n evaluation_batch_predict_starting_replica_count\n ),\n 'evaluation_batch_predict_max_replica_count': (\n evaluation_batch_predict_max_replica_count\n ),\n 'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,\n 'evaluation_dataflow_max_num_workers': (\n evaluation_dataflow_max_num_workers\n ),\n 'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,\n 'run_evaluation': run_evaluation,\n })\n if run_distillation:\n # All of magic number \"1.3\" above is because the trial doesn't always finish\n # in time_per_trial. 1.3 is an empirical safety margin here.\n distill_stage_1_deadline_hours = (\n math.ceil(\n float(_DISTILL_TOTAL_TRIALS)\n / parameter_values['stage_1_num_parallel_trials']\n )\n * parameter_values['stage_1_single_run_max_secs']\n * 1.3\n / 3600.0\n )\n\n parameter_values.update({\n 'distill_stage_1_deadline_hours': distill_stage_1_deadline_hours,\n 'distill_batch_predict_machine_type': (\n distill_batch_predict_machine_type\n ),\n 'distill_batch_predict_starting_replica_count': (\n distill_batch_predict_starting_replica_count\n ),\n 'distill_batch_predict_max_replica_count': (\n distill_batch_predict_max_replica_count\n ),\n 'run_distillation': run_distillation,\n })\n pipeline_definition_path = os.path.join(\n pathlib.Path(__file__).parent.resolve(),\n 'deprecated/default_pipeline.json',\n )\n return pipeline_definition_path, parameter_values", "def on_stage_start(self, stage, epoch):\n if stage != sb.Stage.TRAIN:\n self.acc_metric = self.hparams.acc_computer()\n self.bleu_metric = self.hparams.bleu_computer()", "def _monitorPipeline(batchState):\n pl = yield pipelines_client.pipelineList('localhost',\n 'local',\n 'guest',\n batchState['pipeline_name'],\n True)\n pl = pl[0]\n\n\n numTasks = 9\n completedTasks = 6\n for cl, pName in pl['children']:\n try:\n remotePipelines = yield pipelines_client.pipelineList('localhost',\n cl,\n 'guest',\n pName,\n True)\n remotePipeline = remotePipelines[0]\n _log(batchState, 'Loading child pipeline: (%s, %s, %s)' % (cl, pName, remotePipeline['task_name']))\n remoteTask = yield tasks_client.loadTask('localhost',\n cl,\n 'guest',\n remotePipeline['task_name'])\n\n numTasks += remoteTask['numTasks']\n completedTasks += remoteTask['completedTasks']\n except Exception, err:\n _log(batchState, 'Error in monitorPipeline: %s' % str(err))\n\n if pl['children']:\n yield _updateTask(batchState,\n lambda t : t.update(numTasks=numTasks,\n completedTasks=completedTasks))\n\n if batchState['pipeline_state'] == RUNNING_PIPELINE_STATE:\n reactor.callLater(CHILDREN_PIPELINE_REFRESH, _monitorPipeline, batchState)", "def main():\n init()\n separator_len = 40\n for s in stage_instances:\n print('='*separator_len)\n print(s.name)\n print('-'*separator_len)\n\n s.add_tasks() # Add tasks from previous stage\n s.revive_or_archive() # Revive killed tasks or move them to failed\n s.schedule_jobs() # Schedule new jobs if needed\n s.print_status()\n print('='*separator_len + '\\n')\n render(stage_instances)", "def su_cost(node_type, node_count, cpu, gpu, ram, duration):\r\n if node_type == 'std':\r\n node_factor = 1\r\n tot_cpu = 28\r\n tot_ram = 128\r\n if node_type == 'gpu':\r\n node_factor = 2\r\n tot_cpu = 28\r\n tot_ram = 256\r\n if node_type == 'fat':\r\n node_factor = 6\r\n tot_cpu = 56\r\n tot_ram = 1024\r\n # job_setup = \"current setup = {} node type + {} number of nodes + {} number of cpu # + {} number of ram + {} hrs duration of job + {} total cpu + {} total ram\".format(node_type, node_count, cpu, ram, duration, tot_cpu, tot_ram)\r\n # calculate service units\r\n max_resource = top_resource(\r\n alloc_CPU = cpu, cpu_denominator = tot_cpu,\r\n alloc_GPU = gpu, gpu_denominator = 4,\r\n alloc_RAM = ram, ram_denominator = tot_ram)\r\n su = ( (node_count * (max_resource * node_factor)) * 28 * duration )\r\n return(su)", "def get_optimal_submesh_assignments(\n best_n_stages, F_argmin, n_devices, n_ops, submesh_sizes\n):\n current_s = best_n_stages\n current_layer = 0\n current_devices = n_devices\n\n optimal_layer_submesh_assignments = []\n while current_s > 0 and current_layer < n_ops and current_devices > 0:\n next_start_layer, submesh_shape_idx, sharding_config_idx = F_argmin[\n current_s, current_layer, current_devices\n ]\n assert next_start_layer != -1 and current_devices != -1\n optimal_layer_submesh_assignments.append(\n ((current_layer, next_start_layer), submesh_shape_idx, sharding_config_idx)\n )\n current_s -= 1\n current_layer = next_start_layer\n current_devices -= submesh_sizes[submesh_shape_idx]\n\n assert current_s == 0 and current_layer == n_ops and current_devices == 0\n\n return optimal_layer_submesh_assignments", "def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin", "def get_latency_of_one_partition(\n partition: Partition,\n node_to_latency_mapping: Dict[Node, NodeLatency]\n) -> PartitionLatency:\n\n def get_top_nodes(partition: Partition) -> List[Node]:\n \"\"\"Given a partition, return a list of nodes on the top bfs level\"\"\"\n top_nodes: List[Node] = []\n for node in partition.nodes:\n # Skip placeholder and get_attr nodes\n if node.op in {'placeholder', 'get_attr'}:\n continue\n input_nodes: Dict[Node, None] = {}\n map_arg(node.args, lambda n: input_nodes.setdefault(n))\n map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))\n # If a node has no input nodes in this partition,\n # or its input nodes in this partition are placeholders and get_attrs\n # this node is on the top bfs level in this partition\n if not any([n in partition.nodes and n.op not in {'placeholder', 'get_attr'} for n in input_nodes]):\n top_nodes.append(node)\n return top_nodes\n\n def dfs_helper(node: Node, partition_latency) -> PartitionLatency:\n \"\"\"Given a top node of a partition, this function returns\n the latency of the critical path in the partition\n \"\"\"\n node_latency = node_to_latency_mapping[node]\n # Calculate the current overall latency of the partition\n overall_latency = partition_latency.overall_latency + max(node_latency.compute_latency, node_latency.mem_latency)\n # Update the mem latency of this path\n mem_latency = partition_latency.mem_latency + node_latency.mem_latency\n # Update the compute latency of this path\n compute_latency = partition_latency.compute_latency + node_latency.compute_latency\n # Get all users of this node that are in this partition\n users = set(node.users).intersection(partition.nodes)\n if users:\n max_latency = PartitionLatency(mem_latency=0., compute_latency=0., overall_latency=0.)\n for n in users:\n # Get new partition latency recursively\n new_partition_latency = dfs_helper(n, PartitionLatency(mem_latency, compute_latency, overall_latency))\n if new_partition_latency.overall_latency > max_latency.overall_latency:\n max_latency = new_partition_latency\n return max_latency\n # If there is no user, the node is at bottom of the partition\n return PartitionLatency(mem_latency, compute_latency, overall_latency)\n # Main part starts\n # Get all top level nodes of this partition\n top_nodes = get_top_nodes(partition)\n critical_path_latency = PartitionLatency(mem_latency=0., compute_latency=0., overall_latency=0.)\n # Go through all top nodes and find the largest latency (critical pass latency)\n for node in top_nodes:\n partition_latency = dfs_helper(node, PartitionLatency(mem_latency=0., compute_latency=0., overall_latency=0.))\n if partition_latency.overall_latency > critical_path_latency.overall_latency:\n critical_path_latency = partition_latency\n return critical_path_latency", "def build(width, height, depth, classes, stages, filters, include_top, pooling,\n reg=1e-3, bnEps=2e-5, bnMom=0.0):\n inputShape = (height, width, depth)\n chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n inputs = Input(shape=inputShape)\n\n\n # block 1 (initial conv block)\n x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)\n x = Conv2D(64, (7,7), use_bias=False, strides=(2,2),\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(reg))(x)\n x = BatchNormalization(axis=chanDim, name=\"bn_conv1\")(x)\n x = Activation(\"relu\")(x)\n x = ZeroPadding2D(padding=((1,1), (1,1)), name=\"pool1_pad\")(x)\n x = MaxPooling2D(3, strides=2)(x)\n\n for i in range(0, len(stages)):\n stride = (1,1) if i == 0 else (2,2) # block 2 (projection block) w stride(1,1)\n\n print(\"Stage {}, Stride={}\".format(i, stride))\n x = SEResNet.residual_module(x, filters[i+1], stride,\n chanDim=chanDim, red=True, bnEps=bnEps, bnMom=bnMom)\n for j in range(0, stages[i] + 1): #stacking res block to each depth layer\n x = SEResNet.residual_module(x, filters[i+1], stride=(1,1),\n chanDim=chanDim, bnEps=bnEps,\n bnMom=bnMom)\n x = BatchNormalization(axis=chanDim, epsilon=bnEps,\n momentum=bnMom)(x)\n x = Activation(\"relu\")(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, use_bias=False, kernel_regularizer=l2(reg),\n activation='softmax')(x)\n else:\n if pooling == 'avg':\n print(\"Adding average pool\")\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(inputs=inputs, outputs=x, name=\"SEResNet\")\n return model", "def resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]", "def _parcompute(f, dimension, meshsize,**kwargs):\n verbose = kwargs.get('verbose', False)\n flag_lift_label = kwargs.get('flag_lift_label',False)\n use_weighted_delaunay = kwargs.get('use_weighted_delaunay', False)\n lift_grid_size = kwargs.get('lift_grid_size', 200)\n \n # Initialize ray for parallel computation\n ray.init(ignore_reinit_error=True)\n\n since = time.time()\n \n outdict = {}\n thresh_epsilon = 5e-3\n \n \"\"\" Perform a parallel computation of phase diagram \"\"\"\n # 1. generate grid\n grid = makegridnd(meshsize, dimension)\n outdict['grid'] = grid\n grid_ray = ray.put(grid)\n lap = time.time()\n if verbose:\n print('{}-dimensional grid generated at {:.2f}s'.format(dimension,lap-since))\n \n energy = np.asarray([f(x) for x in grid.T]) \n \n lap = time.time()\n if verbose:\n print('Energy computed at {:.2f}s'.format(lap-since))\n\n outdict['energy'] = energy\n \n lap = time.time()\n if verbose:\n print('Energy is corrected at {:.2f}s'.format(lap-since))\n \n # 3. Compute convex hull\n points = np.concatenate((grid[:-1,:].T,energy.reshape(-1,1)),axis=1) \n simplices, hull,upper_hull = point_at_inifinity_convexhull(points)\n outdict['upper_hull']=upper_hull\n outdict['hull'] = hull \n outdict['simplices'] = simplices\n \n if verbose:\n print('Total of {} simplices in the convex hull'.format(len(simplices)))\n \n lap = time.time()\n if verbose:\n print('{} is computed at {:.2f}s'.format(_method,lap-since))\n\n thresh_scale = kwargs.get('thresh_scale',1.25)\n thresh = thresh_scale*euclidean(grid[:,0],grid[:,1])\n \n if verbose:\n print('Using {:.2E} as a threshold for Laplacian of a simplex'.format(thresh)) \n \n outdict['thresh'] = thresh\n\n lap = time.time()\n if verbose:\n print('Simplices are refined at {:.2f}s'.format(lap-since))\n # 4. for each simplex in the hull compute number of connected components (parallel)\n num_comps_ray = [ray_label_simplex.remote(grid_ray, simplex, thresh) for simplex in simplices]\n num_comps = ray.get(num_comps_ray) \n lap = time.time()\n if verbose:\n print('Simplices are labelled at {:.2f}s'.format(lap-since))\n \n outdict['num_comps'] = num_comps\n \n del num_comps_ray\n outdict['coplanar'] = None\n if flag_lift_label:\n \n # 5. lift the labels from simplices to points (parallel)\n if lift_grid_size == meshsize:\n lift_grid_ray = grid_ray\n lift_grid = grid\n else:\n lift_grid = makegridnd(lift_grid_size, dimensions) # we lift labels to a constant mesh \n lift_grid_ray = ray.put(lift_grid)\n \n inside_ray = [ray_lift_label.remote(grid_ray, lift_grid_ray,\n simplex, label) for simplex, label in zip(simplices, num_comps)]\n inside = ray.get(inside_ray)\n \n coplanar = [item[1] for item in inside]\n outdict['coplanar'] = coplanar\n lap = time.time()\n \n if verbose:\n print('Labels are lifted at {:.2f}s'.format(lap-since))\n\n print('Total {}/{} coplanar simplices'.format(Counter(coplanar)[0],len(simplices)))\n\n phase = np.zeros(lift_grid.shape[1])\n for i,label in zip(inside,num_comps):\n if i[1]==1:\n phase[i[0]] = label\n phase = phase.reshape(1,-1)\n output = np.vstack((lift_grid,phase))\n index = ['Phi_'+str(i) for i in range(1, output.shape[0])]\n index.append('label')\n output = pd.DataFrame(data = output,index=index)\n \n del lift_grid_ray, inside_ray, inside\n \n else:\n output = []\n \n outdict['output'] = output \n lap = time.time()\n print('Computation took {:.2f}s'.format(lap-since))\n \n # we remove everything we don't need\n del grid_ray \n \n # finish computation and exit ray\n ray.shutdown()\n\n return outdict", "def standard_optimize_phases():\n return [\n annotate_downstream_side_inputs,\n annotate_stateful_dofns_as_roots,\n fix_side_input_pcoll_coders,\n pack_combiners,\n lift_combiners,\n expand_sdf,\n fix_flatten_coders,\n # sink_flattens,\n greedily_fuse,\n read_to_impulse,\n extract_impulse_stages,\n remove_data_plane_ops,\n sort_stages,\n ]", "def measure_mp_speedup():\n modes = [\n # name, function\n ('dSMC', ana.d_smc),\n ('dAMC', ana.d_amc),\n ('EDF-VD', ana.d_edf_vd),\n ('pSMC', ana.p_smc),\n ('pAMC-BB', ana.p_amc_bb),\n ('pAMC-BB+', ft.partial(ana.p_amc_bb, ignore_hi_mode=True))\n ]\n times_seq = {}\n task_sets_list = pickle.load(open(task_sets_path + 'task_sets_fairgen', 'rb'))\n start_total_seq = time()\n for name, func in modes:\n start_mode_seq = time()\n rates = []\n for task_sets in task_sets_list:\n results = []\n for task_set in task_sets:\n results.append(func(task_set))\n rates.append(100 * np.average(results))\n stop_mode_seq = time()\n times_seq[name] = stop_mode_seq - start_mode_seq\n stop_total_seq = time()\n times_seq['Overall'] = stop_total_seq - start_total_seq\n\n times_par = {}\n start_total_par = time()\n pool = mp.Pool()\n for name, func in modes:\n start_mode_par = time()\n rates = []\n for task_sets in task_sets_list:\n rates.append(100 * np.average(pool.map(func, task_sets)))\n stop_mode_par = time()\n times_par[name] = stop_mode_par - start_mode_par\n stop_total_par = time()\n times_par['Overall'] = stop_total_par - start_total_par\n\n speedups = {}\n for name, _ in modes:\n speedups[name] = times_seq[name] / times_par[name]\n speedups['Overall'] = times_seq['Overall'] / times_par['Overall']\n\n print(\"PERFORMANCE MEASUREMENTS\")\n print(\"Number of cores: %d\" % mp.cpu_count())\n print(\"Scheme: Sequential time / Parallel time / Speedup\")\n for name, _ in modes:\n print(\"%s: %.3fs / %.3fs / %.3f\" % (name, times_seq[name], times_par[name], speedups[name]))\n print(\"Overall: %.3fs / %.3fs / %.3f\" % (times_seq['Overall'], times_par['Overall'], speedups['Overall']))", "def n_delay_stages(self):\n return self._n_delay_stages", "def n_delay_stages(self):\n return self._n_delay_stages", "def determine_jobs_per_pool(numpools, totaljobs):\n cluster = os.environ['CC_CLUSTER']\n if cluster in ['graham', 'beluga']:\n jobs_per_pool = math.floor(totaljobs / numpools)\n else:\n jobs_per_pool = totaljobs\n return jobs_per_pool", "def _benchmark_cnn(self):\n self.single_session = False\n (image_producer_ops, enqueue_ops, fetches) = self._build_model()\n fetches_list = nest.flatten(list(fetches.values()))\n main_fetch_group = tf.group(*fetches_list)\n execution_barrier = None\n \n\n global_step = tf.train.get_global_step()\n with tf.device(self.global_step_device):\n with tf.control_dependencies([main_fetch_group]):\n fetches['inc_global_step'] = global_step.assign_add(1)\n\n\n local_var_init_op = tf.local_variables_initializer()\n variable_mgr_init_ops = [local_var_init_op]\n with tf.control_dependencies([local_var_init_op]):\n variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())\n local_var_init_op_group = tf.group(*variable_mgr_init_ops)\n\n summary_op = tf.summary.merge_all()\n is_chief = (not self.job_name or self.task_index == 0)\n summary_writer = None\n \n # We run the summaries in the same thread as the training operations by\n # passing in None for summary_op to avoid a summary_thread being started.\n # Running summaries and training operations in parallel could run out of\n # GPU memory.\n saver = tf.train.Saver(\n self.variable_mgr.savable_variables(), save_relative_paths=True)\n ready_for_local_init_op = None\n \n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=self.params.train_dir,\n ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_var_init_op_group,\n saver=saver,\n global_step=global_step,\n summary_op=None,\n save_model_secs=self.params.save_model_secs,\n summary_writer=summary_writer)\n\n step_train_times = []\n start_standard_services = (\n self.params.summary_verbosity >= 1 or\n self.dataset.queue_runner_required())\n target = self.cluster_manager.get_target() if self.cluster_manager else ''\n with sv.managed_session(\n master=target,\n config=create_config_proto(self.params),\n start_standard_services=start_standard_services) as sess:\n image_producer = cnn_util.ImageProducer(sess, image_producer_ops,\n self.batch_group_size)\n image_producer.start()\n for i in xrange(len(enqueue_ops)):\n sess.run(enqueue_ops[:(i + 1)])\n image_producer.notify_image_consumption()\n self.init_global_step, = sess.run([global_step])\n if not self.single_session:\n global_step_watcher = GlobalStepWatcher(\n sess, global_step,\n self.num_workers * self.num_warmup_batches +\n self.init_global_step,\n self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)\n global_step_watcher.start()\n \n\n log_fn('Running warm up')\n local_step = -1 * self.num_warmup_batches\n done_fn = global_step_watcher.done\n loop_start_time = time.time()\n while not done_fn():\n if local_step == 0:\n log_fn('Done warm up')\n \n header_str = 'Step\\tImg/sec\\tloss'\n \n log_fn(header_str)\n \n # reset times to ignore warm up batch\n step_train_times = []\n loop_start_time = time.time()\n \n fetch_summary = None\n summary_str = benchmark_one_step(\n sess, fetches, local_step,\n self.batch_size * (self.num_workers if self.single_session else 1),\n step_train_times, self.trace_filename, image_producer, self.params,\n fetch_summary)\n \n local_step += 1\n loop_end_time = time.time()\n # Waits for the global step to be done, regardless of done_fn.\n \n num_steps = global_step_watcher.num_steps()\n elapsed_time = global_step_watcher.elapsed_time()\n\n average_wall_time = elapsed_time / num_steps if num_steps > 0 else 0\n images_per_sec = ((self.num_workers * self.batch_size) / average_wall_time\n if average_wall_time > 0 else 0)\n\n log_fn('-' * 64)\n log_fn('total images/sec: %.2f' % images_per_sec)\n log_fn('-' * 64)\n image_producer.done()\n #if is_chief:\n # store_benchmarks({'total_images_per_sec': images_per_sec}, self.params)\n # Save the model checkpoint.\n \n sv.stop()\n return {\n 'num_workers': self.num_workers,\n 'num_steps': num_steps,\n 'average_wall_time': average_wall_time,\n 'images_per_sec': images_per_sec\n }", "def _weigh_object(self, host_state, weight_properties):\n return 1.0 * host_state.vcpus_total / max(host_state.vcpus_used, 1)", "def expand_gbk(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n for stage in stages:\n transform = only_transform(stage.transforms)\n if transform.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:\n for pcoll_id in transform.inputs.values():\n pipeline_context.length_prefix_pcoll_coders(pcoll_id)\n for pcoll_id in transform.outputs.values():\n if pipeline_context.use_state_iterables:\n pipeline_context.components.pcollections[\n pcoll_id].coder_id = pipeline_context.with_state_iterables(\n pipeline_context.components.pcollections[pcoll_id].coder_id)\n pipeline_context.length_prefix_pcoll_coders(pcoll_id)\n\n # This is used later to correlate the read and write.\n transform_id = stage.name\n if transform != pipeline_context.components.transforms.get(transform_id):\n transform_id = unique_name(\n pipeline_context.components.transforms, stage.name)\n pipeline_context.components.transforms[transform_id].CopyFrom(transform)\n grouping_buffer = create_buffer_id(transform_id, kind='group')\n gbk_write = Stage(\n transform.unique_name + '/Write',\n [\n beam_runner_api_pb2.PTransform(\n unique_name=transform.unique_name + '/Write',\n inputs=transform.inputs,\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_OUTPUT_URN,\n payload=grouping_buffer))\n ],\n downstream_side_inputs=frozenset(),\n must_follow=stage.must_follow)\n yield gbk_write\n\n yield Stage(\n transform.unique_name + '/Read',\n [\n beam_runner_api_pb2.PTransform(\n unique_name=transform.unique_name + '/Read',\n outputs=transform.outputs,\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_INPUT_URN,\n payload=grouping_buffer))\n ],\n downstream_side_inputs=stage.downstream_side_inputs,\n must_follow=union(frozenset([gbk_write]), stage.must_follow))\n else:\n yield stage", "def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )", "def main():\n # Handle CLI.\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--molecule\", type=str, default=\"H2\", help=\"the \"\n \"UCCSD molecule to perform HPO on\")\n parser.add_argument(\"--slice-index\", type=int, default=-1, help=\"the \"\n \"slice to perform HPO on, do not specify to run HPO \"\n \"on the full circuit\")\n parser.add_argument(\"--core-count\", type=int, default=1, help=\"the \"\n \"number of cpu cores this run may use\")\n args = vars(parser.parse_args())\n molecule = args[\"molecule\"]\n slice_index = args[\"slice_index\"]\n core_count = args[\"core_count\"]\n\n # Generate the state object that encapsulates the optimization for the circuit.\n state = ProcessState(molecule, slice_index)\n\n # Redirect everything the central process puts out to a log file.\n # By default, ray redirects the stdout of each worker process\n # to the central process.\n log_file = state.file_name + \".log\"\n log_file_path = os.path.join(state.data_path, log_file)\n with open(log_file_path, \"a+\") as log:\n sys.stdout = sys.stderr = log\n\n # Display run characteristics.\n print(\"PID={}\\nWALL_TIME={}\\nSLICE_INDEX={}\\nPULSE_TIME={}\\n\"\n \"(LR_LB, LR_UB)=({}, {})\\n(DECAY_LB, DECAY_UB)=({}, {})\\n\"\n \"CORE_COUNT={}\\n{}\"\n \"\".format(os.getpid(), time.time(), state.slice_index,\n state.pulse_time, LR_LB, LR_UB, DECAY_LB, DECAY_UB, \n core_count, state.circuit))\n\n # Define the search space on the parameters: learning rate and\n # learning rate decay.\n space = {\n \"lr\": hp.loguniform(\"lr\", np.log(LR_LB), np.log(LR_UB)),\n \"decay\": hp.uniform(\"decay\", DECAY_LB, DECAY_UB),\n }\n \n # We want to minimize QOC error/loss, i.e. we want to maximize\n # negative loss.\n algo = ray.tune.suggest.HyperOptSearch(space, max_concurrent=core_count,\n reward_attr=\"neg_loss\")\n run_config = {\n \"num_samples\": HPO_MAX_ITERATIONS,\n \"name\": state.file_name,\n \"loggers\": [ray.tune.logger.NoopLogger],\n \"search_alg\": algo,\n \"verbose\": 1,\n \"local_dir\": state.data_path,\n \"resume\": True,\n }\n \n # Ray cannot serialize python objects in its object store,\n # so we have to pass the state in a lambda wrapper.\n objective_wrapper = lambda config, reporter: objective(state, config,\n reporter)\n \n # Start ray and run HPO.\n ray.init(num_cpus=core_count, object_store_memory=OBJECT_STORE_MEMORY,\n redis_max_memory=REDIS_MAX_MEMORY)\n ray.tune.register_trainable(\"lambda_id\", objective_wrapper)\n ray.tune.run(\"lambda_id\", **run_config)", "def preparePipelines(self):\n\n # Construct the differnent states making up the pipeline\n\n # Input assembly state describes how primitives are assembled\n # This pipeline will assemble vertex data as a triangle lists (though we only use one triangle)\n inputAssemblyState = vk.VkPipelineInputAssemblyStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,\n topology = vk.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST\n )\n # Rasterization state\n rasterizationState = vk.VkPipelineRasterizationStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,\n polygonMode = vk.VK_POLYGON_MODE_FILL,\n cullMode = vk.VK_CULL_MODE_NONE,\n frontFace = vk.VK_FRONT_FACE_COUNTER_CLOCKWISE,\n depthClampEnable = vk.VK_FALSE,\n rasterizerDiscardEnable = vk.VK_FALSE,\n depthBiasEnable = vk.VK_FALSE,\n lineWidth = 1.0\n )\n # Color blend state describes how blend factors are calculated (if used)\n # We need one blend attachment state per color attachment (even if blending is not used\n blendAttachmentState = vk.VkPipelineColorBlendAttachmentState(\n colorWriteMask = 0xf,\n blendEnable = vk.VK_FALSE\n )\n colorBlendState = vk.VkPipelineColorBlendStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,\n attachmentCount = 1,\n pAttachments = [blendAttachmentState]\n )\n # Viewport state sets the number of viewports and scissor used in this pipeline\n # Note: This is actually overriden by the dynamic states (see below)\n viewportState = vk.VkPipelineViewportStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,\n viewportCount = 1,\n scissorCount = 1\n )\n # Enable dynamic states\n # Most states are baked into the pipeline, but there are still a few dynamic states that can be changed within a command buffer\n #To be able to change these we need do specify which dynamic states will be changed using this pipeline. Their actual states are set later on in the command buffer.\n # For this example we will set the viewport and scissor using dynamic states\n dynamicStateEnables = [vk.VK_DYNAMIC_STATE_VIEWPORT, vk.VK_DYNAMIC_STATE_SCISSOR]\n dynamicState = vk.VkPipelineDynamicStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,\n dynamicStateCount = len(dynamicStateEnables),\n pDynamicStates = dynamicStateEnables\n )\n\n # Depth and stencil state containing depth and stencil compare and test operations\n # We only use depth tests and want depth tests and writes to be enabled and compare with less or equal\n opState = vk.VkStencilOpState(\n failOp = vk.VK_STENCIL_OP_KEEP,\n passOp = vk.VK_STENCIL_OP_KEEP,\n compareOp = vk.VK_COMPARE_OP_ALWAYS\n )\n depthStencilState = vk.VkPipelineDepthStencilStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,\n depthTestEnable = vk.VK_TRUE,\n depthWriteEnable = vk.VK_TRUE,\n depthCompareOp = vk.VK_COMPARE_OP_LESS_OR_EQUAL,\n depthBoundsTestEnable = vk.VK_FALSE,\n stencilTestEnable = vk.VK_FALSE,\n front = opState,\n back = opState\n )\n # Multi sampling state\n # This example does not make use fo multi sampling (for anti-aliasing), the state must still be set and passed to the pipeline\n multisampleState = vk.VkPipelineMultisampleStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,\n rasterizationSamples = vk.VK_SAMPLE_COUNT_1_BIT,\n pSampleMask = None\n )\n # Vertex input descriptions\n # Specifies the vertex input parameters for a pipeline\n #Vertex input binding\n # This example uses a single vertex input binding at binding point 0 (see vkCmdBindVertexBuffers)\n vertexInputBinding = vk.VkVertexInputBindingDescription(\n binding = 0,\n stride = self.vertexShape.size * self.vertexShape.itemsize,\n inputRate = vk.VK_VERTEX_INPUT_RATE_VERTEX\n )\n # Input attribute bindings describe shader attribute locations and memory layouts\n vertexInputAttributs = []\n # These match the following shader layout (see triangle.vert):\n # layout (location = 0) in vec3 inPos;\n # layout (location = 1) in vec3 inColor;\n # Attribute location 0: Position\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 0,\n # Position attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = 0 # offsetof(vertexShape, position)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n vertexInputAttribut = vk.VkVertexInputAttributeDescription(\n binding = 0,\n location = 1,\n # Color attribute is three 32 bit signed (SFLOAT) floats (R32 G32 B32)\n format = vk.VK_FORMAT_R32G32B32_SFLOAT,\n offset = self.vertexShape[0].size * self.vertexShape.itemsize # offsetof(vertexShape, color)\n )\n vertexInputAttributs.append(vertexInputAttribut)\n\n # Vertex input state used for pipeline creation\n vertexInputState = vk.VkPipelineVertexInputStateCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,\n vertexBindingDescriptionCount = 1,\n pVertexBindingDescriptions = [vertexInputBinding],\n vertexAttributeDescriptionCount = len(vertexInputAttributs),\n pVertexAttributeDescriptions = vertexInputAttributs\n )\n # Shaders\n shaderStages = []\n # Vertex shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_VERTEX_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.vert.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n # Fragment shader\n shaderStage = vk.VkPipelineShaderStageCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n # Set pipeline stage for this shader\n stage = vk.VK_SHADER_STAGE_FRAGMENT_BIT,\n # Load binary SPIR-V shader\n module = vks.vulkantools.loadShader(self.getAssetPath() + \"shaders/triangle/triangle.frag.spv\", self.device),\n pName = \"main\"\n )\n shaderStages.append(shaderStage)\n\n # Assign the pipeline states to the pipeline creation info structure\n pipelineCreateInfo = vk.VkGraphicsPipelineCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,\n # The layout used for this pipeline (can be shared among multiple pipelines using the same layout)\n layout = self.pipelineLayout,\n # Renderpass this pipeline is attached to\n renderPass = self.renderPass,\n pVertexInputState = vertexInputState,\n pInputAssemblyState = inputAssemblyState,\n pRasterizationState = rasterizationState,\n pColorBlendState = colorBlendState,\n pMultisampleState = multisampleState,\n pViewportState = viewportState,\n pDepthStencilState = depthStencilState,\n pDynamicState = dynamicState,\n stageCount = len(shaderStages),\n pStages = shaderStages\n )\n # Create rendering pipeline using the specified states\n self.pipelines = vk.vkCreateGraphicsPipelines(self.device, self.pipelineCache, 1, [pipelineCreateInfo], None)\n try:\n self.pipeline = self.pipelines[0]\n except TypeError:\n self.pipeline = self.pipelines\n # Shader modules are no longer needed once the graphics pipeline has been created\n vk.vkDestroyShaderModule(self.device, shaderStages[0].module, None)\n vk.vkDestroyShaderModule(self.device, shaderStages[1].module, None)", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def updateEdgePipelineLevel(self):\n for e_list in self.s2e.values():\n for e in e_list:\n slot_path = []\n src_slot = self.v2s[e.src]\n dst_slot = self.v2s[e.dst]\n slot_path = self.e_name2path[e.name]\n\n # 2 levels of pipelining for each slot crossing\n if src_slot == dst_slot:\n e.pipeline_level = 0\n else:\n e.pipeline_level = (len(slot_path) + 1) * 2", "def compute_network_load_and_bottleneck_workload(env: ControlledRandomWalk) \\\n -> Tuple[np.ndarray, np.ndarray, np.ndarray, List]:\n xi = cvx.Variable((env.num_buffers, 1))\n nu = cvx.Variable((env.num_resources, 1))\n objective = cvx.Maximize(xi.T * env.job_generator.demand_rate)\n constraints = [\n -env.job_generator.buffer_processing_matrix.T * xi - env.constituency_matrix.T * nu\n <= np.zeros((env.num_activities, 1)),\n np.ones((env.num_resources, 1)).T * nu <= 1,\n nu >= np.zeros((env.num_resources, 1))\n ]\n prob = cvx.Problem(objective, constraints)\n network_load = prob.solve(solver=cvx.CPLEX)\n return network_load, xi, nu, constraints", "def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##", "def _serialcompute(f, dimension, meshsize,**kwargs):\n verbose = kwargs.get('verbose', False)\n lower_hull_method = kwargs.get('lower_hull_method', None)\n flag_lift_label = kwargs.get('flag_lift_label',False)\n lift_grid_size = kwargs.get('lift_grid_size', meshsize) \n since = time.time()\n \n outdict = defaultdict(list)\n \n \"\"\" Perform a parallel computation of phase diagram \"\"\"\n # 1. generate grid\n grid = makegridnd(meshsize, dimension)\n outdict['grid'] = grid\n \n lap = time.time()\n if verbose:\n print('{}-dimensional grid generated at {:.2f}s'.format(dimension,lap-since))\n\n energy = np.asarray([f(x) for x in grid.T])\n\n lap = time.time()\n if verbose:\n print('Energy computed at {:.2f}s'.format(lap-since))\n \n max_energy = np.max(energy)\n \n if lower_hull_method is None:\n pad_energy = kwargs.get('pad_energy',2)\n doctor_points = np.asarray([is_boundary_point(x) for x in grid.T])\n energy[doctor_points] = pad_energy*max_energy\n \n if verbose:\n print('Aplpying {:d}x padding of {:.2f} maximum energy'.format(pad_energy, max_energy))\n \n outdict['energy'] = energy\n \n lap = time.time()\n if verbose:\n print('Energy is corrected at {:.2f}s'.format(lap-since))\n points = np.concatenate((grid[:-1,:].T,energy.reshape(-1,1)),axis=1) \n \n if lower_hull_method is None: \n hull = ConvexHull(points)\n upper_hull = np.asarray([is_upper_hull(grid,simplex) for simplex in hull.simplices])\n simplices = hull.simplices[~upper_hull]\n elif lower_hull_method=='point_at_infinity':\n simplices, hull,upper_hull = point_at_inifinity_convexhull(points)\n elif lower_hull_method=='negative_znorm':\n simplices, hull,upper_hull = negative_znorm_convexhull(points)\n \n outdict['upper_hull']=upper_hull\n outdict['hull'] = hull\n \n lap = time.time()\n if verbose:\n print('Simplices are computed and refined at {:.2f}s'.format(lap-since))\n \n outdict['simplices'] = simplices\n if verbose:\n print('Total of {} simplices in the convex hull'.format(len(simplices)))\n\n thresh_scale = kwargs.get('thresh_scale',1.25)\n thresh = thresh_scale*euclidean(grid[:,0],grid[:,1])\n \n if verbose:\n print('Using {:.2E} as a threshold for Laplacian of a simplex'.format(thresh)) \n \n outdict['thresh'] = thresh\n \n # 4. for each simplex in the hull compute number of connected components (parallel)\n num_comps = [label_simplex(grid, simplex, thresh) for simplex in simplices]\n lap = time.time()\n if verbose:\n print('Simplices are labelled at {:.2f}s'.format(lap-since))\n outdict['num_comps'] = num_comps\n outdict['coplanar'] = None\n \n if flag_lift_label:\n if lift_grid_size == meshsize:\n lift_grid = grid\n else:\n lift_grid = makegridnd(lift_grid_size, dimensions) # we lift labels to a constant mesh \n \n inside = [lift_label(grid, lift_grid, simplex, label) for simplex, label in zip(simplices, num_comps)]\n \n coplanar = [item[1] for item in inside]\n outdict['coplanar']=np.asarray(coplanar)\n lap = time.time()\n if verbose:\n print('Labels are lifted at {:.2f}s'.format(lap-since))\n\n print('Total {}/{} coplanar simplices'.format(np.sum(coplanar),len(simplices)))\n\n phase = np.zeros(lift_grid.shape[1])\n for i,label in zip(inside,num_comps):\n if not i[1]:\n phase[i[0]] = label\n phase = phase.reshape(1,-1)\n output = np.vstack((lift_grid,phase))\n index = ['Phi_'+str(i) for i in range(1, output.shape[0])]\n index.append('label')\n output = pd.DataFrame(data = output,index=index)\n \n else:\n output = []\n \n outdict['output'] = output \n \n lap = time.time()\n if verbose:\n print('Computation took {:.2f}s'.format(lap-since))\n \n return outdict", "def __init__(self, num_cycles_index1=None, num_cycles_index2=None, num_cycles_read1=None, num_cycles_read2=None, num_lanes=None, num_reads=None, num_surfaces=None, num_swaths_per_lane=None, num_tiles_per_swath=None, error_rate=None, error_rate_r1=None, error_rate_r2=None, intensity_cycle1=None, is_indexed=None, max_cycle_called=None, max_cycle_extracted=None, max_cycle_scored=None, min_cycle_called=None, min_cycle_extracted=None, min_cycle_scored=None, non_indexed_error_rate=None, non_indexed_intensity_cycle1=None, non_indexed_percent_aligned=None, non_indexed_percent_gt_q30=None, non_indexed_projected_total_yield=None, non_indexed_yield_total=None, percent_aligned=None, percent_gt_q30=None, percent_gt_q30_last10_cycles=None, percent_gt_q30_r1=None, percent_gt_q30_r2=None, percent_pf=None, percent_resynthesis=None, phasing_r1=None, phasing_r2=None, pre_phasing_r1=None, pre_phasing_r2=None, projected_total_yield=None, reads_pf_total=None, reads_total=None, yield_total=None, clusters=None, clusters_pf=None, cluster_density=None, occupancy=None, percent_loading_concentration=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._num_cycles_index1 = None\n self._num_cycles_index2 = None\n self._num_cycles_read1 = None\n self._num_cycles_read2 = None\n self._num_lanes = None\n self._num_reads = None\n self._num_surfaces = None\n self._num_swaths_per_lane = None\n self._num_tiles_per_swath = None\n self._error_rate = None\n self._error_rate_r1 = None\n self._error_rate_r2 = None\n self._intensity_cycle1 = None\n self._is_indexed = None\n self._max_cycle_called = None\n self._max_cycle_extracted = None\n self._max_cycle_scored = None\n self._min_cycle_called = None\n self._min_cycle_extracted = None\n self._min_cycle_scored = None\n self._non_indexed_error_rate = None\n self._non_indexed_intensity_cycle1 = None\n self._non_indexed_percent_aligned = None\n self._non_indexed_percent_gt_q30 = None\n self._non_indexed_projected_total_yield = None\n self._non_indexed_yield_total = None\n self._percent_aligned = None\n self._percent_gt_q30 = None\n self._percent_gt_q30_last10_cycles = None\n self._percent_gt_q30_r1 = None\n self._percent_gt_q30_r2 = None\n self._percent_pf = None\n self._percent_resynthesis = None\n self._phasing_r1 = None\n self._phasing_r2 = None\n self._pre_phasing_r1 = None\n self._pre_phasing_r2 = None\n self._projected_total_yield = None\n self._reads_pf_total = None\n self._reads_total = None\n self._yield_total = None\n self._clusters = None\n self._clusters_pf = None\n self._cluster_density = None\n self._occupancy = None\n self._percent_loading_concentration = None\n self.discriminator = None\n\n self.num_cycles_index1 = num_cycles_index1\n self.num_cycles_index2 = num_cycles_index2\n self.num_cycles_read1 = num_cycles_read1\n self.num_cycles_read2 = num_cycles_read2\n self.num_lanes = num_lanes\n self.num_reads = num_reads\n self.num_surfaces = num_surfaces\n self.num_swaths_per_lane = num_swaths_per_lane\n self.num_tiles_per_swath = num_tiles_per_swath\n if error_rate is not None:\n self.error_rate = error_rate\n if error_rate_r1 is not None:\n self.error_rate_r1 = error_rate_r1\n if error_rate_r2 is not None:\n self.error_rate_r2 = error_rate_r2\n if intensity_cycle1 is not None:\n self.intensity_cycle1 = intensity_cycle1\n if is_indexed is not None:\n self.is_indexed = is_indexed\n if max_cycle_called is not None:\n self.max_cycle_called = max_cycle_called\n if max_cycle_extracted is not None:\n self.max_cycle_extracted = max_cycle_extracted\n if max_cycle_scored is not None:\n self.max_cycle_scored = max_cycle_scored\n if min_cycle_called is not None:\n self.min_cycle_called = min_cycle_called\n if min_cycle_extracted is not None:\n self.min_cycle_extracted = min_cycle_extracted\n if min_cycle_scored is not None:\n self.min_cycle_scored = min_cycle_scored\n if non_indexed_error_rate is not None:\n self.non_indexed_error_rate = non_indexed_error_rate\n if non_indexed_intensity_cycle1 is not None:\n self.non_indexed_intensity_cycle1 = non_indexed_intensity_cycle1\n if non_indexed_percent_aligned is not None:\n self.non_indexed_percent_aligned = non_indexed_percent_aligned\n if non_indexed_percent_gt_q30 is not None:\n self.non_indexed_percent_gt_q30 = non_indexed_percent_gt_q30\n if non_indexed_projected_total_yield is not None:\n self.non_indexed_projected_total_yield = non_indexed_projected_total_yield\n if non_indexed_yield_total is not None:\n self.non_indexed_yield_total = non_indexed_yield_total\n if percent_aligned is not None:\n self.percent_aligned = percent_aligned\n if percent_gt_q30 is not None:\n self.percent_gt_q30 = percent_gt_q30\n if percent_gt_q30_last10_cycles is not None:\n self.percent_gt_q30_last10_cycles = percent_gt_q30_last10_cycles\n if percent_gt_q30_r1 is not None:\n self.percent_gt_q30_r1 = percent_gt_q30_r1\n if percent_gt_q30_r2 is not None:\n self.percent_gt_q30_r2 = percent_gt_q30_r2\n if percent_pf is not None:\n self.percent_pf = percent_pf\n if percent_resynthesis is not None:\n self.percent_resynthesis = percent_resynthesis\n if phasing_r1 is not None:\n self.phasing_r1 = phasing_r1\n if phasing_r2 is not None:\n self.phasing_r2 = phasing_r2\n if pre_phasing_r1 is not None:\n self.pre_phasing_r1 = pre_phasing_r1\n if pre_phasing_r2 is not None:\n self.pre_phasing_r2 = pre_phasing_r2\n if projected_total_yield is not None:\n self.projected_total_yield = projected_total_yield\n if reads_pf_total is not None:\n self.reads_pf_total = reads_pf_total\n if reads_total is not None:\n self.reads_total = reads_total\n if yield_total is not None:\n self.yield_total = yield_total\n if clusters is not None:\n self.clusters = clusters\n if clusters_pf is not None:\n self.clusters_pf = clusters_pf\n if cluster_density is not None:\n self.cluster_density = cluster_density\n if occupancy is not None:\n self.occupancy = occupancy\n if percent_loading_concentration is not None:\n self.percent_loading_concentration = percent_loading_concentration", "def on_stage_start(self, stage, epoch=None):\n\n # Set up statistics trackers for this stage\n\n # Set up evaluation-only statistics trackers\n if stage != sb.Stage.TRAIN:\n self.error_metrics = self.hparams.error_stats()\n if stage == sb.Stage.VALID:\n label_encoder = sb.dataio.encoder.CategoricalEncoder()\n\n lab_enc_file = os.path.join(self.hparams.save_folder, \"label_encoder.txt\")\n\n label_encoder.load(path=lab_enc_file)\n\n self.bona_index = label_encoder.encode_label('bonafide')\n self.spoof_index = label_encoder.encode_label('spoof')\n\n self.pd_out = {'files': [], 'scores': []}", "def __stage2(self, img, total_boxes, stage_status: StageStatus):\r\n\r\n num_boxes = total_boxes.shape[0]\r\n if num_boxes == 0:\r\n return total_boxes, stage_status\r\n\r\n # second stage\r\n tempimg = np.zeros(shape=(24, 24, 3, num_boxes))\r\n\r\n for k in range(0, num_boxes):\r\n tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3))\r\n\r\n tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \\\r\n img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :]\r\n\r\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\r\n tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)\r\n\r\n else:\r\n return np.empty(shape=(0,)), stage_status\r\n\r\n tempimg = (tempimg - 127.5) * 0.0078125\r\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\r\n\r\n out = self._rnet.run(tempimg1)\r\n\r\n out0 = np.transpose(out[0])\r\n out1 = np.transpose(out[1])\r\n\r\n score = out1[1, :]\r\n\r\n ipass = np.where(score > self._steps_threshold[1])\r\n\r\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\r\n\r\n mv = out0[:, ipass[0]]\r\n\r\n if total_boxes.shape[0] > 0:\r\n pick = self.__nms(total_boxes, 0.7, 'Union')\r\n total_boxes = total_boxes[pick, :]\r\n total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))\r\n total_boxes = self.__rerec(total_boxes.copy())\r\n\r\n return total_boxes, stage_status", "def __init__(self, is_master, track_processes, write_profile,\n verbose_cluster_stats):\n my_ip = appscale_info.get_private_ip()\n lb_ips = appscale_info.get_load_balancer_ips()\n\n self._is_lb = my_ip in lb_ips\n if is_master is not None:\n self._is_master = is_master\n else:\n self._is_master = my_ip == appscale_info.get_headnode_ip()\n self._track_processes = track_processes\n self._write_profile = write_profile\n\n # There are 3 kinds of local stats (node/processes/proxies)\n self._local_node_stats = LocalStats(\n cache_size=NODE_STATS_CACHE_SIZE,\n update_interval=UPDATE_NODE_STATS_INTERVAL)\n self._local_processes_stats = LocalStats(\n cache_size=PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROCESSES_STATS_INTERVAL)\n self._local_proxies_stats = LocalStats(\n cache_size=PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_PROXIES_STATS_INTERVAL)\n\n if self._is_master:\n # And 3 same kinds of cluster stats\n self._cluster_nodes_stats = ClusterStats(\n cache_size=CLUSTER_NODES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_NODES_STATS_INTERVAL)\n self._cluster_processes_stats = ClusterStats(\n cache_size=CLUSTER_PROCESSES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROCESSES_STATS_INTERVAL)\n self._cluster_proxies_stats = ClusterStats(\n cache_size=CLUSTER_PROXIES_STATS_CACHE_SIZE,\n update_interval=UPDATE_CLUSTER_PROXIES_STATS_INTERVAL)\n\n if not verbose_cluster_stats:\n # To reduce slave-to-master traffic and verbosity of cluster stats\n # you can select which fields of stats to collect on master\n self._cluster_nodes_stats.included_field_lists = {\n 'node': ['cpu', 'memory', 'partitions_dict', 'loadavg'],\n 'node.cpu': ['percent', 'count'],\n 'node.memory': ['available'],\n 'node.partition': ['free', 'used'],\n 'node.loadavg': ['last_5min'],\n }\n self._cluster_processes_stats.included_field_lists = {\n 'process': ['monit_name', 'unified_service_name', 'application_id',\n 'port', 'cpu', 'memory', 'children_stats_sum'],\n 'process.cpu': ['user', 'system', 'percent'],\n 'process.memory': ['resident', 'virtual', 'unique'],\n 'process.children_stats_sum': ['cpu', 'memory'],\n }\n self._cluster_proxies_stats.included_field_lists = {\n 'proxy': ['name', 'unified_service_name', 'application_id',\n 'frontend', 'backend'],\n 'proxy.frontend': ['scur', 'smax', 'rate', 'req_rate', 'req_tot'],\n 'proxy.backend': ['qcur', 'scur', 'hrsp_5xx', 'qtime', 'rtime'],\n }\n\n # All routes (handlers will be assigned during configuration)\n self._routes = {\n '/stats/local/node/cache': None,\n '/stats/local/node/current': None,\n '/stats/local/processes/cache': None,\n '/stats/local/processes/current': None,\n '/stats/local/proxies/cache': None,\n '/stats/local/proxies/current': None,\n '/stats/cluster/nodes': None,\n '/stats/cluster/processes': None,\n '/stats/cluster/proxies': None,\n }\n self._publishers = []", "def _compute_network_layout(self):\n self._layer_sizes = np.round(np.linspace(100, 1, self._num_layers)). \\\n astype(np.int64).tolist()", "def main():\n target = 'Coding:Level1'\n output_root = f'problem_5_output/{target.replace(\":\", \"_\")}'\n if not os.path.exists(output_root):\n os.makedirs(output_root, exist_ok=True)\n\n # dictionary of parameter grids, one for each process\n param_grids = {\n 'early_stopping': ParameterGrid([\n {\n 'patience': [15], # , 20, 40]\n },\n ]),\n 'fit': ParameterGrid([\n {\n 'batch_size': [128], # , 64, 128, 256],\n 'epochs': [16], # 20, 50],\n },\n ]),\n 'model_preprocessor': ParameterGrid([\n {\n 'num_unique_words': [5000], # 4000, 1000, 6000, 10000],\n 'max_sequence_length': [150], # 50, 75, 100, 125, 150, 200],\n },\n ]),\n 'model': ParameterGrid([\n # {\n # Dense single hidden layer model hyperparameters:\n # 'name': ['dense_h1'],\n # 'embedded_dims': [8], # , 16, 32, 64, 128, 256],\n # 'num_units_h1': [8], # , 16, 32, 64, 128, 256],\n # 'drop_h1': [None], # , 0.1, 0.2, 0.25, 0.5, 0.75],\n # 'optimizer': ['nadam', 'adam'],\n # 'learning_rate': [None], # , 0.01, 0.001],\n # 'activation': ['relu', 'tanh'],\n # },\n # {\n # Dense double hidden layer model hyperparameters:\n # 'name': ['dense_h2'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [128],\n # 'num_units_h2': [128],\n # 'drop_h1': [None],\n # 'drop_h2': [0.5],\n # 'optimizer': ['nadam'],\n # 'activation': ['relu'],\n # 'learning_rate': [0.01],\n # },\n # {\n # CNN single hidden layer model hyperparameters\n # 'name': ['conv_h1'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [32], # , 64, 256],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'drop_embed': [0.2], # , 0.5],\n # 'activation': ['relu', 'tanh'],\n # 'optimizer': ['adam', 'nadam']\n # },\n # {\n # CNN double hidden layer model hyperparameters\n # 'name': ['conv_h2'],\n # 'embedded_dims': [128], # , 64, 32, 16, 8],\n # 'num_units_h1': [32], # , 64, 128],\n # 'drop_h2': [0.5], # , 0.75, 0.25, 0.1],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'num_units_h2': [128], # , 64, 32, 16, 8],\n # 'drop_embed': [0.2], # , 0.50],\n # 'activation': ['relu'],\n # 'optimizer': ['adam'], # , 'nadam'],\n # },\n # {\n # CNN double hidden layer model hyperparameters\n # 'name': ['conv_h2.1'],\n # 'embedded_dims': [64],\n # 'num_units_h1': [32], # , 64, 128],\n # 'k_conv_h1': [2], # , 3, 4],\n # 'drop_embed': [0.2], # , 0.5],\n # 'activation': ['relu'],\n # 'optimizer': ['adam'], # , 'nadam']\n # },\n # {\n # RNN single hidden layer model hyperparameters\n # 'name': ['rnn_h1'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'num_units_h1': [128],\n # 'optimizer': ['nadam'],\n # 'learning_rate': [0.01]\n # },\n # {\n # # LSTM double hidden layer (second layer dense FC) model hyperparameters\n # 'name': ['lstm_h1'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'drop_h1': [0.5],\n # 'num_units_h1': [128],\n # 'optimizer': ['nadam'],\n # },\n # {\n # LSTM double hidden layer (second layer dense FC) model hyperparameters\n # 'name': ['lstm_h2'],\n # 'embedded_dims': [64],\n # 'drop_embed': [0.2],\n # 'num_units_h1': [128],\n # 'drop_h1': [0.5],\n # 'num_units_h2': [128],\n # 'optimizer': ['nadam'],\n # 'activation': ['relu']\n # },\n # {\n # # Bi-directional LSTM single hidden layer model hyperparameters\n # 'name': ['bi_lstm_h1'],\n # 'embedded_dims': [32], # , 64, 128],\n # 'drop_embed': [0.2], # , 0.25, 0.5],\n # 'num_units_h1': [32], # , 64, 128],\n # 'drop_h1': [0.2], # , 0.25, 0.5],\n # 'optimizer': ['nadam', 'adam']\n # },\n # {\n # Bi-directional LSTM double hidden layer (second layer Bi-LSTM) model hyperparameters\n # 'name': ['bi_lstm_h2'],\n # 'embedded_dims': [32], # , 64, 128],\n # 'num_units_h1': [32], # , 64, 128],\n # 'num_units_h2': [32], # , 64, 128],\n # 'drop_h1': [0.25, 0.5],\n # 'drop_h2': [0.25, 0.5],\n # 'optimizer': ['nadam', 'adam']\n # },\n {\n # Multi Convolutional model hyperparameters\n 'name': ['multi_conv_h3_s2'],\n 'drop_embed': [0.5], # , 0.3],\n 'embedded_dims': [128], # , 64, 128, 256],\n 'num_units_h1': [128], # , 64, 128, 256],\n 'num_units_h2': [128], # , 64, 128, 256],\n 'num_units_h3': [128], # , 64, 128, 256],\n 'num_units_h4': [128], # , 64, 128, 256],\n 'k_conv_h1': [3],\n 'k_conv_h2': [2],\n 'activation': ['relu'], # , 'tanh'],\n 'drop_h3': [0.2], # , 0.2, 0.25, 0.5, 0.75],\n 'optimizer': ['adam'], # 'nadam']\n },\n # {\n # # Multi Convolutional model hyperparameters\n # 'name': ['multi_conv_h3_s3'],\n # 'drop_embed': [0.5], # , 0.3],\n # 'embedded_dims': [32], # , 64, 128, 256],\n # 'num_units_h1': [32], # , 64, 128, 256],\n # 'num_units_h2': [32], # , 64, 128, 256],\n # 'num_units_h3': [32], # , 64, 128, 256],\n # 'num_units_h4': [32], # , 64, 128, 256],\n # 'k_conv_h1': [3],\n # 'k_conv_h2': [2],\n # 'k_conv_h3': [4],\n # 'k_conv_h4': [4],\n # 'activation': ['relu', 'tanh'],\n # 'drop_h4': [0.1], # , 0.2, 0.25, 0.5, 0.75],\n # 'optimizer': ['adam', 'nadam']\n # },\n ]),\n 'preprocessor': ParameterGrid([\n # {\n # 'do_clean': [False],\n # 'pad_type': ['pre', 'post'],\n # 'trunc_type': ['pre', 'post'],\n # },\n {\n 'do_clean': [True],\n 'pad_type': ['post'], # , 'post'],\n 'trunc_type': ['post'], # 'post'],\n 'omit_stopwords': [False],\n 'ignore_urls': [False],\n 'fix_contractions': [True],\n 'stem': [True],\n 'remove_foreign_characters': [False], # , False],\n 'lower': [True], # , False],\n 'remove_punctuation': [True], # , False],\n 'bigrams': [True], # , False]\n },\n ])\n }\n\n def prod(a):\n if len(a) == 0:\n return 1\n return a[0] * prod(a[1:])\n\n num_models = prod([len(pg) for pg in param_grids.values()])\n\n param_grid_names = sorted(list(param_grids.keys()))\n param_grid_list = [param_grids[k] for k in param_grid_names]\n\n all_params, best_params = assemble_results(output_root)\n\n if CHECK_ONLY:\n for i, params in enumerate(itertools.product(*param_grid_list[3:5])):\n params = {k: v for k, v in zip(param_grid_names[3:5], params)}\n print(i, params)\n Preprocessor(**params['preprocessor'], **params['model_preprocessor'])\n\n for i, params in enumerate(itertools.product(*param_grid_list[2:4])):\n params = {k: v for k, v in zip(param_grid_names[2:4], params)}\n print(i, params)\n build_fn(num_classes=3, **params['model'], **params['model_preprocessor'])\n return\n\n for i, params in enumerate(itertools.product(*param_grid_list)):\n mem = psutil.virtual_memory()\n percent_used = 1 - mem.available / mem.total\n print(f'{percent_used:.2%} memory used')\n if percent_used > 0.80:\n # exit failure\n print('Exiting (-1)')\n exit(-1)\n\n params = {k: v for k, v in zip(param_grid_names, params)}\n print(f'\\n{i + 1}/{num_models}: {params}\\n')\n\n if params in all_params:\n # skip this one because we already ran it.\n continue\n\n if best_params is not None:\n # print best performance so far\n print(f'best params: {best_params}')\n print(f'best val loss: {best_params[\"results\"][\"valid\"][\"loss\"]:.6f}')\n print(f'best val acc: {best_params[\"results\"][\"valid\"][\"accuracy\"]:.4%}')\n\n # create a new output directory with path to model file.\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H.%M.%S.%f\")\n output_dir = os.path.join(output_root, date)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_file = os.path.join(output_dir, 'model.h5')\n\n # get the preprocessed training and validation data\n preprocess_time = time.time()\n classes, data_sets, set_names = get_xy(Preprocessor(**params['preprocessor'], **params['model_preprocessor']),\n target=target)\n ((x_train, y_train), (x_valid, y_valid)) = data_sets\n preprocess_time -= time.time()\n\n # build and compile model\n model = build_fn(num_classes=len(classes), **params['model'], **params['model_preprocessor'])\n\n # setup callbacks\n early_stopping = EarlyStopping(monitor='val_loss', verbose=1, **params['early_stopping'])\n model_checkpoint = ModelCheckpoint(\n filepath=model_file,\n save_weights_only=False, save_freq='epoch',\n save_best_only=True, monitor='val_loss', verbose=1)\n callbacks = [early_stopping, model_checkpoint]\n\n # Use sample weights to treat classes equally in loss and accuracy.\n sample_weight = get_sample_weight(y_train)\n sample_weight_valid = get_sample_weight(y_valid)\n\n # fit the model\n train_time = time.time()\n model.fit(x=x_train, y=y_train, sample_weight=sample_weight, verbose=1,\n validation_data=(x_valid, y_valid, sample_weight_valid), callbacks=callbacks, **params['fit'])\n train_time -= time.time()\n\n # load the best model (last one saved)\n model = load_model(model_file, compile=True)\n\n # compute results\n results = get_performance(model, data_sets, set_names)\n results['time'] = {'train': train_time, 'preprocess': preprocess_time}\n\n print(pd.DataFrame(data=results).T)\n params['results'] = results\n\n # save params and results\n with open(os.path.join(output_dir, 'params.json'), 'w') as fp:\n json.dump(params, fp)\n\n # save a copy of *this* Python file.\n shutil.copyfile(__file__, os.path.join(output_dir, 'roatan.py'))\n\n # for convenience, show the validation loss and accuracy in a file name in the same directory.\n result_file_name = f'{params[\"results\"][\"valid\"][\"loss\"]:.6f}_{params[\"results\"][\"valid\"][\"accuracy\"]:.4f}.out'\n with open(os.path.join(output_dir, result_file_name), 'w'):\n pass\n\n # check_model(output_dir)\n\n if best_params is None or (params['results']['valid']['loss'] < best_params['results']['valid']['loss']):\n best_params = params\n\n # assemble results from all runs into one CSV file in output root.\n assemble_results(output_root)", "def main() -> co.Parallel:\n actors = [\"Oprah Winfrey\", \"Kate Mara\", \"Don Cheadle\", \"Dwayne Johnson\"]\n root = co.Parallel(image=_get_image())\n for actor in actors:\n root[actor] = co.Lazy(\n f\"python pipeline.py all_by_actor '{actor}'\"\n )\n return root", "def setupCPUSwept(solver):\n timeSlice = slice(0,solver.sharedShape[0],1)\n solver.blocks = [(timeSlice,)+tuple(block) for block in solver.blocks]\n solver.edgeblocks = makeEdgeBlocksSwept(solver.blocks,solver.arrayShape,solver.blocksize)\n solver.cpu.set_globals(*solver.globals)\n #Creating sets for cpu calculation\n up_sets = createUpPyramidSets(solver.blocksize,solver.operating)\n down_sets = createDownPyramidSets(solver.blocksize,solver.operating)\n oct_sets = down_sets+up_sets\n y_sets,x_sets = createBridgeSets(solver.blocksize,solver.operating,solver.maxPyramidSize)\n cshape = solver.sharedArray[solver.blocks[0]].shape if solver.blocks else (0,)\n #Initializing CPU portion of Geometry\n solver.Up.initializeCPU(solver.cpu,up_sets,solver.intermediate-1,cshape) \n solver.Down.initializeCPU(solver.cpu,down_sets,solver.intermediate-1,cshape)\n solver.Xb.initializeCPU(solver.cpu,x_sets,solver.intermediate-1,cshape)\n solver.Yb.initializeCPU(solver.cpu,y_sets,solver.intermediate-1,cshape)\n solver.Oct.initializeCPU(solver.cpu,oct_sets,solver.intermediate-1,cshape)", "def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = 10 # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # ##print(p, map_size, num_feat, tmp)\n return tmp", "def build_pipeline():\n full_df = pd.read_csv(\"../data/healthcare-dataset-stroke-data.csv\",index_col = \"id\").drop(columns = [\"stroke\"],axis=1)\n #transform functions to make the pipeline work\n one_hot_encode_transformed = FunctionTransformer(one_hot_encode)\n impute_transformed = FunctionTransformer(impute)\n add_bodytype_transformed = FunctionTransformer(add_bodytype)\n add_diabetes_transformed = FunctionTransformer(add_diabetes)\n add_preexisting_transformed = FunctionTransformer(add_preexisting)\n add_missing_cols_transformed = FunctionTransformer(add_missing_cols,kw_args={\"total_tags\":get_all_tags(full_df)})\n pipeline = Pipeline([\n\n \n (\"add_bodytype\",add_bodytype_transformed),\n (\"add_diabetes\",add_diabetes_transformed),\n (\"add_preexisting\",add_preexisting_transformed),\n (\"impute\",impute_transformed),\n (\"one_hot_encode\",one_hot_encode_transformed),\n (\"add_missing_cols\",add_missing_cols_transformed),\n #use all available threads\n (\"over_under\" , SMOTEENN()),\n (\"pred\",XGBClassifier(nthread = -1,verbosity = 0,tree_method = 'gpu_hist',eval_metric = \"aucpr\",sampling_method = \"gradient_based\"))\n ])\n \n #set up parameters to test\n parameters = {\n\n 'pred__scale_pos_weight' : list(range(1,60,5)),\n 'over_under__sampling_strategy' : ['auto',0.1,0.2,0.3,0.4,0.5],\n \"pred__max_delta_step\": list(range(0,11))\n \n } \n \n grid = GridSearchCV(pipeline, param_grid=parameters,n_jobs = -1 ,scoring =\"average_precision\",verbose = 1)\n\n return grid", "def __inst_total_cost(self, cur_inst_pc, cur_inst_stage):\n\t\ttotal_cost = 0\n\n\t\tcur_inst_metadata = self.instruction_list[\\\n\t\t\tcur_inst_pc // self.WORD_SIZE]\n\n\t\tcur_inst_func_unit = cur_inst_metadata[\"functional_unit\"]\n\n\t\tif cur_inst_stage == \"execution\":\n\t\t\ttotal_cost += self.functional_units[cur_inst_func_unit][\"clock_cycles\"]\n\t\t\tif \"additional_cost\" in cur_inst_metadata:\n\t\t\t\ttotal_cost += cur_inst_metadata[\"additional_cost\"]\n\n\t\tif cur_inst_stage in self.stage_delay:\n\t\t\ttotal_cost += self.stage_delay[cur_inst_stage]\n\n\t\ttotal_cost += self.inst_status[cur_inst_pc]\\\n\t\t\t[self.PIPELINE_STAGES[self.PIPELINE_STAGES.\\\n\t\t\t\tindex(cur_inst_stage) - 1]]\n\n\t\treturn total_cost", "def number_of_workers():\n return (cpu_count() * 2) + 1", "def visualize_mapper_stages(data, y=None, lens=None, cover=None, graph=None, dG=None, **kwargs):\n\n #### Setup\n if y is None and hasattr(data, \"y\"):\n y = data.y.copy()\n elif y is None and hasattr(data, \"target\"):\n y = data.target.copy()\n\n try:\n G = dG.G_.copy()\n except:\n from .graph_utils import process_graph\n G = process_graph(graph, meta=y)\n \n if not hasattr(G, 'node'):\n G.node = dict(G.nodes(data=True))\n\n \n # member color cmap\n cmap = kwargs.get('cmap') \n if hasattr(data, \"cmap\"):\n cmap = data.cmap\n else:\n cmap = \"nipy_spectral_r\"\n cmap = cmap if callable(cmap) else plt.get_cmap(cmap)\n\n # member color norm\n norm = kwargs.get('norm') \n if hasattr(data, \"norm\"):\n norm = data.norm\n else:\n norm = mpl.colors.Normalize(y.min(), y.max())\n \n # member_color \n c = cmap(norm(y))\n c_hex = np.array([mpl.colors.to_hex(_) for _ in c])\n\n # node color, size\n node_size = kwargs.get('node_size')\n if node_size is None:\n node_scale_by = kwargs.get('node_scale_by', 5000)\n node_size = [node_scale_by*(len(_) / len(y)) for n,_ in G.nodes(data='members')]\n node_color = [Counter(c_hex[_]).most_common()[0][0] for n,_ in G.nodes(data='members')]\n\n\n # edge color, size\n edge_size = kwargs.get('edge_size')\n if edge_size is None:\n edge_scale_by = kwargs.get('edge_scale_by', 0.5)\n edge_size = [edge_scale_by*_ for u,v,_ in G.edges(data='size')]\n edge_color = kwargs.get('edge_color')\n if edge_color is None:\n edge_sources = [G.node[u].get('members') for u,v in G.edges()]\n edge_targets = [G.node[v].get('members') for u,v in G.edges()]\n edge_color = [Counter(c_hex[s + t]).most_common()[0][0] for s,t in zip(edge_sources, edge_targets)]\n\n\n # layout (for last stage only)\n layout = kwargs.get('layout', None)\n pos = kwargs.get('pos', None)\n if layout is None:\n pos = \"inverse\"\n\n # init figure, subplots\n figsize = kwargs.get('figsize', (20,4))\n fig, axes = plt.subplots(1, 4, figsize=figsize)\n\n \n #### Draw\n # ensure the lens is 2D\n lens2D = lens.copy()\n if len(lens.T) < 2:\n lens2D = np.c_[np.zeros_like(lens) + lens.mean(), lens] \n elif len(lens.T) > 2:\n lens2D = lens2D[:, :2]\n\n # 1. draw lens (axes: 1-3)\n for ax in axes[:3]:\n ax.scatter(*lens2D.T, c=c, s=np.max(node_size)*.05)\n \n # adjust xlim if 1D\n if len(lens.T) < 2:\n ax.set_xlim(lens.min(), lens.max())\n\n # 2. draw cover (axes: 2)\n draw_cover(ax=axes[1], graph=graph, lens=lens2D, cover=cover)\n\n # 3. draw clusters (axes: 3)\n draw_networkx(G, lens=lens2D, pos=\"inverse\", layout=None, \n node_color=node_color, node_size=node_size, \n edge_color=edge_color, width=edge_size, \n alpha=0.5, edges=False, ax=axes[2])\n\n # 4. draw graph (axes: 4)\n draw_networkx(G, lens=lens2D, pos=pos, layout=layout, \n node_color=node_color, node_size=node_size, \n edge_color=edge_color, width=edge_size, \n alpha=1.0, ax=axes[3])\n if layout is None:\n axes[3].set_xlim(axes[2].get_xlim())\n axes[3].set_ylim(axes[2].get_ylim())\n axes[3].axis('off')\n\n\n #### Finish\n for ax in axes:\n\n # despine, based on number of dimensions\n if len(lens.T) > 1:\n despine(ax, spines=['top','right'])\n else:\n despine(ax, spines=['top', 'right', 'bottom', 'left'])\n\n # tight layout\n ax.set_aspect('equal')\n fig.tight_layout(w_pad=2.0)\n\n return fig, axes", "def main():\n dataset_idx = [11]\n network_idx = [0]\n reshape_input = [False]\n output_idxs = [0, 1]\n lrs = [0, 1, 2]\n dataset_ft_idx = [0,1,2,3]\n counter_exp = 0\n freeze = [0]\n percentages = [12]\n for dts in range(len(dataset_idx)):\n for nt in range(len(network_idx)):\n for opt in output_idxs:\n for dft in dataset_ft_idx:\n for pr in percentages:\n for rsi in range(len(reshape_input)):\n for fr in freeze:\n for lr in lrs:\n config = configuration(dataset_idx=dataset_idx[dts],\n network_idx=network_idx[nt],\n output_idx=opt,\n usage_modus_idx=5,\n dataset_fine_tuning_idx=dft,\n reshape_input=reshape_input[rsi],\n learning_rates_idx=lr,\n name_counter=counter_exp,\n freeze=fr,\n percentage_idx=pr,\n fully_convolutional=False)\n\n setup_experiment_logger(logging_level=logging.DEBUG,\n filename=config['folder_exp'] + \"logger.txt\")\n\n logging.info('Finished')\n\n modus = Modus_Selecter(config)\n\n # Starting process\n modus.net_modus()\n counter_exp += 1\n\n\n return", "def map(\n f: typing.Callable,\n stage: Stage = pypeln_utils.UNDEFINED,\n workers: int = None,\n maxsize: int = None,\n timeout: float = 0,\n on_start: typing.Callable = None,\n on_done: typing.Callable = None,\n) -> Stage:\n\n if pypeln_utils.is_undefined(stage):\n return pypeln_utils.Partial(\n lambda stage: map(\n f,\n stage=stage,\n workers=workers,\n maxsize=maxsize,\n timeout=timeout,\n on_start=on_start,\n on_done=on_done,\n )\n )\n\n stage = to_stage(stage)\n\n return Map(\n f=f, on_start=on_start, on_done=on_done, timeout=timeout, dependencies=[stage],\n )", "def __init__(self, *args, **kwargs):\n if args:\n parameters = args[0]\n else:\n parameters = kwargs\n self.parameters = parameters\n self.number_of_nodes = self.parameters['Number_of_nodes']\n self.detecting_deadlock = self.parameters['detect_deadlock']\n self.digraph = nx.DiGraph()\n self.lmbda = [self.parameters['Arrival_rates']['Class ' + str(i)] for i in range(self.parameters['Number_of_classes'])]\n self.overall_lmbda = sum([sum(self.lmbda[i]) for i in range(len(self.lmbda))])\n self.mu = [self.parameters['Service_rates']['Class ' + str(i)] for i in range(self.parameters['Number_of_classes'])]\n self.c = self.parameters['Number_of_servers']\n self.schedules = [False for i in range(len(self.c))]\n for i in range(len(self.c)):\n if type(self.c[i])==type('string') and self.c[i]!='Inf':\n self.schedules[i] = True \n self.queue_capacities = self.parameters['Queue_capacities']\n self.transition_matrix = [self.parameters['Transition_matrices']['Class ' + str(i)] for i in range(self.parameters['Number_of_classes'])]\n if 'Class_change_matrices' in self.parameters:\n self.class_change_matrix = [self.parameters['Class_change_matrices']['Node ' + str(i)] for i in range(self.parameters['Number_of_nodes'])]\n else:\n self.class_change_matrix = 'NA'\n self.max_simulation_time = self.parameters['Simulation_time']\n self.transitive_nodes = [Node(i + 1, self) for i in range(len(self.c))]\n self.nodes = [ArrivalNode(self)] + self.transitive_nodes + [ExitNode(\"Inf\")]\n self.service_times = self.find_service_time_dictionary()\n self.state = [[0, 0] for i in range(self.number_of_nodes)]\n initial_state = [[0, 0] for i in range(self.number_of_nodes)]\n self.times_dictionary = {tuple(tuple(initial_state[i]) for i in range(self.number_of_nodes)): 0.0}\n\n if len(self.lmbda) != len(self.mu) or len(self.lmbda) != len(self.transition_matrix) or len(self.mu) != len(self.transition_matrix):\n raise ValueError('Lambda, Mu and the Transition Matrix should all have the same number of classes')\n\n if any(len(lmbdacls) != len(self.c) for lmbdacls in self.lmbda):\n raise ValueError('Lambda should have same length as c for every class')\n\n if any(len(mucls) != len(self.c) for mucls in self.mu):\n raise ValueError('Mu should have same length as c for every class')\n\n if any(len(transmatrxcls) != len(self.c) for transmatrxcls in self.transition_matrix):\n raise ValueError('Transition matrix should be square matrix of length c for every class')\n\n if any(len(transmatrxrow) != len(self.c) for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls):\n raise ValueError('Transition matrix should be square matrix of length c for every class')\n\n if any(l < 0 for lmbdaclass in self.lmbda for l in lmbdaclass):\n raise ValueError('All arrival rates should be positive')\n\n if any(tmval < 0 for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls for tmval in transmatrxrow) or any(tmval > 1 for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls for tmval in transmatrxrow) or any(sum(transmatrxrow) > 1 for transmatrxcls in self.transition_matrix for transmatrxrow in transmatrxcls):\n raise ValueError('All transition matrix entries should be probabilities 0<=p<=1 and all transition matrix rows should sum to 1 or less')\n\n if self.max_simulation_time < 0:\n raise ValueError('Maximum simulation time should be positive')", "def on_stage_start(self, stage, epoch=None):\n\n # Set up statistics trackers for this stage\n self.loss_metric = sb.utils.metric_stats.MetricStats(\n metric=sb.nnet.losses.nll_loss\n )\n\n # Set up evaluation-only statistics trackers\n if stage != sb.Stage.TRAIN:\n self.error_metrics = self.hparams.error_stats()", "def greedily_fuse(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> FrozenSet[Stage]\n producers_by_pcoll = {} # type: Dict[str, Stage]\n consumers_by_pcoll = collections.defaultdict(\n list) # type: DefaultDict[str, List[Stage]]\n\n # Used to always reference the correct stage as the producer and\n # consumer maps are not updated when stages are fused away.\n replacements = {} # type: Dict[Stage, Stage]\n\n def replacement(s):\n old_ss = []\n while s in replacements:\n old_ss.append(s)\n s = replacements[s]\n for old_s in old_ss[:-1]:\n replacements[old_s] = s\n return s\n\n def fuse(producer, consumer):\n fused = producer.fuse(consumer, pipeline_context)\n replacements[producer] = fused\n replacements[consumer] = fused\n\n # First record the producers and consumers of each PCollection.\n for stage in stages:\n for transform in stage.transforms:\n for input in transform.inputs.values():\n consumers_by_pcoll[input].append(stage)\n for output in transform.outputs.values():\n producers_by_pcoll[output] = stage\n\n # Now try to fuse away all pcollections.\n for pcoll, producer in producers_by_pcoll.items():\n write_pcoll = None\n for consumer in consumers_by_pcoll[pcoll]:\n producer = replacement(producer)\n consumer = replacement(consumer)\n # Update consumer.must_follow set, as it's used in can_fuse.\n consumer.must_follow = frozenset(\n replacement(s) for s in consumer.must_follow)\n if producer.can_fuse(consumer, pipeline_context):\n fuse(producer, consumer)\n else:\n # If we can't fuse, do a read + write.\n pipeline_context.length_prefix_pcoll_coders(pcoll)\n buffer_id = create_buffer_id(pcoll)\n if write_pcoll is None:\n write_pcoll = Stage(\n pcoll + '/Write',\n [\n beam_runner_api_pb2.PTransform(\n unique_name=pcoll + '/Write',\n inputs={'in': pcoll},\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_OUTPUT_URN,\n payload=buffer_id))\n ],\n downstream_side_inputs=producer.downstream_side_inputs)\n fuse(producer, write_pcoll)\n if consumer.has_as_main_input(pcoll):\n read_pcoll = Stage(\n pcoll + '/Read',\n [\n beam_runner_api_pb2.PTransform(\n unique_name=pcoll + '/Read',\n outputs={'out': pcoll},\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_INPUT_URN,\n payload=buffer_id))\n ],\n downstream_side_inputs=consumer.downstream_side_inputs,\n must_follow=frozenset([write_pcoll]))\n fuse(read_pcoll, consumer)\n else:\n consumer.must_follow = union(\n consumer.must_follow, frozenset([write_pcoll]))\n\n # Everything that was originally a stage or a replacement, but wasn't\n # replaced, should be in the final graph.\n final_stages = frozenset(stages).union(list(replacements.values()))\\\n .difference(list(replacements))\n\n for stage in final_stages:\n # Update all references to their final values before throwing\n # the replacement data away.\n stage.must_follow = frozenset(replacement(s) for s in stage.must_follow)\n # Two reads of the same stage may have been fused. This is unneeded.\n stage.deduplicate_read()\n return final_stages", "def rep(self):\n origin, pipeline_builder = self._get_origin(self.origin)\n destination, pipeline_builder = self._get_destination(self.destination, pipeline_builder)\n pipeline_builder.add_error_stage('Discard')\n\n if self.number_of_processors == 4:\n stream_selector, pipeline_builder = self._get_stream_selector(pipeline_builder)\n expression_evaluator, pipeline_builder = self._get_expression_evaluator(pipeline_builder)\n field_type_converter, pipeline_builder = self._get_field_type_converter(pipeline_builder)\n schema_generator, pipeline_builder = self._get_schema_generator(pipeline_builder)\n trash, pipeline_builder = self._get_destination('Trash', pipeline_builder)\n\n origin >> stream_selector\n stream_selector >> trash\n stream_selector >> expression_evaluator >> field_type_converter >> schema_generator >> destination\n\n stream_selector.condition = [{'outputLane': stream_selector.output_lanes[0],\n 'predicate': '${record:attribute(\"sourceId\") == \"DOESNOTEXIST\"}'},\n {'outputLane': stream_selector.output_lanes[1],\n 'predicate': 'default'}]\n else:\n origin >> destination\n\n for environment in self.environments.values():\n if environment is not None:\n pipeline = pipeline_builder.build().configure_for_environment(environment)\n\n results = self.sdc_executor.benchmark_pipeline(pipeline,\n record_count=self.record_count,\n runs=self.runs)\n\n results['generated_date'] = str(datetime.now())\n results['sdc_version'] = self.sdc_builder.version\n results['origin'] = self.origin\n results['destination'] = self.destination\n results['record_count'] = self.record_count\n results['threads'] = self.number_of_threads\n results['dataset'] = DATASETS[self.dataset]['label']\n results['batch_size'] = self.batch_size\n results['destination_data_format'] = self.destination_format\n results['processor_count'] = self.number_of_processors\n results['cpu_count'] = len(psutil.Process().cpu_affinity())\n results['memory_gb'] = round(psutil.virtual_memory().total / (1000 ** 3))\n try:\n results['instance_type'] = urlopen('http://169.254.169.254/latest/meta-data/instance-type').read().decode('utf-8')\n except (HTTPError, URLError):\n results['instance_type'] = 'unknown'\n\n results['origin_system'] = self.origin_system\n results['destination_system'] = self.destination_system\n\n # Remove outliers\n if len(results['runs']) > 1:\n results['runs'] = [x for x in results['runs'] if -STD_DEV_THRESHOLD < (x - results['throughput_mean']) / results['throughput_std_dev'] < STD_DEV_THRESHOLD]\n results['throughput_mean'] = statistics.mean(results['runs'])\n\n with open(f\"results/{results['pipeline_id']}.json\", \"w\") as file:\n json.dump(results, file)\n\n # Cleanup\n if self.destination == 'Kafka Producer':\n admin_client = KafkaAdminClient(bootstrap_servers=self.environments['cluster'].kafka.brokers, request_timeout_ms=180000)\n admin_client.delete_topics([self.destination_kafka_topic])\n\n if self.destination == 'JDBC Producer':\n self.destination_table.drop(self.environments['database'].engine)\n\n if self.origin == 'HTTP Client':\n self.http_mock.delete_mock()", "def main():\n feature_extraction_model = \"HOG\"\n dimension_reduction_model = \"PCA\"\n k_value = get_input_k(\"k\")\n K_value = get_input_k(\"K\")\n folder = get_input_folder(\"Folder\")\n dim_k_value = 40\n\n query_images = get_input_image_list(folder)\n start = time.time()\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value, folder_metadata=folder,\n metadata_collection=\"labelled\")\n obj_feat = dim_red.get_object_feature_matrix()\n features_list = np.array(obj_feat['featureVector'].tolist())\n images_list = np.array(obj_feat['imageId'])\n cos_sim = cosine_similarity(features_list)\n\n sim_graph = sim_graph_from_sim_max(cos_sim, images_list, k_value)\n results = ppr(sim_graph, images_list, query_images)\n results = results[:K_value]\n\n print(\"Top {} images from Personalized page Rank are:\".format(K_value))\n for r in results:\n r[\"path\"] = os.path.abspath(os.path.join(folder, r['imageId']))\n print(r)\n\n query_images_list = [os.path.abspath(os.path.join(folder, img)) for img in query_images]\n title = {\"Model\": \"Personalized Page Rank\", \"k\": k_value, \"K\": K_value}\n show_images_ppr(query_images_list, title, results)\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def optimize_pipe_size(self, network_frame, lb_diam, peak_consumption, max_capacity={}):\n\n # Start data initialization\n GraphParam = self.get_params(network_frame, peak_consumption)\n GraphParam['LB_DIAM'] = lb_diam\n\n # In case of old network\n if len(max_capacity) > 0:\n GraphParam['MAX_CAPACITY'] = {}\n for e, val in max_capacity.items():\n GraphParam['MAX_CAPACITY'][(e[0],e[1])] = val\n else:\n GraphParam['MAX_CAPACITY'] = {}\n \n # Start the algorithm\n # Use NLP module\n optimizer_directory = os.path.dirname(os.path.realpath(__file__))\n with JuliaQgisInterface() as j:\n j.include(os.path.join(optimizer_directory, \"NLP\", \"NLP_variable_flows.jl\"))\n j.using(\"Main.NLP: find_optimal_physical_parameters\")\n assert (hasattr(j, \"find_optimal_physical_parameters\"))\n nlp_start = time.time()\n NLP_Output, status = j.find_optimal_physical_parameters(GraphParam,\n self.conf,\n self.solver_log_file,\n self.energy,\n self.logger.info)\n nlp_end = time.time()\n self.logger.info(\"nlp time: %s\" % str(nlp_end - nlp_start))\n return NLP_Output, status", "def computer_driver_heuristic(self, pc):\n if pc == self.departure_location:\n return self.nearest_neigbor(pc)\n else:\n # encode state: State -> Generalized One hot vector\n # print(len(self.idx_to_pc)+1)\n encoded_vector = np.zeros(len(self.idx_to_pc)+1)\n\n # indices of locations FOR ENCODING\n pickup_jobs_idx = [self.pc_to_idx[p]+1 for p in list(self.state.P_k.keys())] # +1 is to make room for the time dim\n deliv_jobs_idx = [self.pc_to_idx[p]+1 for p in list(self.state.D_k.keys())]\n\n # indices of locations FOR PC READING\n pickup_jobs_idx_read = [self.pc_to_idx[p] for p in list(self.state.P_k.keys())]\n deliv_jobs_idx_read = [self.pc_to_idx[p] for p in list(self.state.D_k.keys())]\n tasks = set(pickup_jobs_idx_read + deliv_jobs_idx_read)\n\n if len(tasks) > 0:\n # set appropriate values at the index corresponding to the location\n encoded_vector[pickup_jobs_idx] = -0.5\n encoded_vector[deliv_jobs_idx] = 0.5\n encoded_vector[self.pc_to_idx[pc]+1] = 1\n\n # # current time encoded as nb of seconds between 12pm and now/nb seconds between 12pm and 12am\n total_nb_seconds = datetime.timedelta(hours=12, minutes=0)\n cur_time = self.state.t_k.time()\n cur_time = datetime.timedelta(hours=cur_time.hour, minutes=cur_time.minute) # nb of seconds from 12am\n # # TODO this can further be noramlized as most values will be >0 (>6am)\n cur_time = 2 * cur_time.seconds / total_nb_seconds.seconds - 1 # normalized time in [-1,1]\n encoded_vector[0] = cur_time\n\n # predict decision\n pred = self.heuristic_model.predict_proba(encoded_vector.reshape(1,-1))\n\n # take the most probable location among the remaining jobs\n # # set proba to 0 if location not among remaining jobs\n # print(\"##############\")\n # print(\"shape of pred \", pred.shape)\n # print(\"Number of locations considered : \", len(self.idx_to_pc))\n print(\"Possible indices to choose from : \", tasks)\n pred[0, list(set(range(0, len(self.idx_to_pc))) - set(pickup_jobs_idx_read + deliv_jobs_idx_read))] = 0\n\n idx_opt = np.argsort(pred[0,:])[-1] # most probable location (by its index) among remaining jobs\n print(\"Index chosen : \", idx_opt )\n return self.idx_to_pc[idx_opt]\n\n elif len(tasks) == 0:\n return 0\n\n else:\n raise ValueError('Problem with tasks, which has negative length...')", "def online_server_garbler_phase(env, pipe, storage):\n\n global clienf_inf_times, num_infs_completed, request_times, waiting_times, offline_times\n while True:\n request = yield pipe.get()\n start_time = env.now\n waiting_time = start_time - request['request_time'] \n waiting_times.append(waiting_time)\n \n before_gc = env.now\n yield storage.get(NUM_RELU)\n offline_times.append(env.now - before_gc)\n\n\n for i in range(len(utils.on_client_compute_relu)): # for i in range(nonlinear layers)...\n yield env.timeout(utils.on_client_write_linear[i] / bandwidth) # client sends x-r to server\n yield env.timeout(utils.on_server_compute_linear[i]) # server performs linear eval (conv)\n yield env.timeout(utils.on_server_write_relu[i] / bandwidth) # server sends encoded labels to client\n yield env.timeout(utils.on_client_compute_relu[i]) # client evaluates garbled circuit\n\n \n # send prediction to client\n yield env.timeout(utils.on_server_write_pred / bandwidth) # server sends prediction to client\n\n num_infs_completed +=1\n client_inf_times.append(env.now-start_time)", "def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val", "def test_scaled_parallel_transport(self):\n \n for k in (1/11, -1/11, 11, -2):\n self._test_parallel_transport(k=k)", "def __init__(self):\n self.processes = 0\n self.resources = 0\n # Holds tuples: (x,y,z) x = process num, y = request/release, z = resource num.\n self.steps = []\n # Marks if resource is available or not.\n self.available = []\n # Adjacency Matrices for hold and request edges.\n self.hold_edges = []\n self.request_edges = []\n # Tracks where we are in the state of the system.\n self.state_num = 0\n # Wait-for graph that we perform DFS for detecting deadlock cycles.\n self.graph = Graph()\n # Connected vertices holding resource.\n self.connected_v = []\n # State stringified.\n self.state_string = [\" \"] * 2", "def topo_efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.topo_shortestpathij(i, j) == None):\n continue\n Temp += 1/self.topo_shortestpathij(i, j)\n \n self.topo_efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def cores_per_node(self):\n return self.cores_per_socket * self.sockets_per_node", "def solve(self, **kwargs):\n\n sorted_compute_nodes = sorted(\n self.physical.compute_nodes,\n key=lambda x: self.physical.cores(x) * 1000 + self.physical.memory(x),\n reverse=True,\n )\n\n for n_partitions_to_try in range(\n self.lower_bound(), len(sorted_compute_nodes) + 1\n ):\n\n # partitioning of virtual nodes in n_partitions_to_try partitions\n k_partition = get_partitions(\n self.virtual.g, n_partitions=n_partitions_to_try\n )\n\n # subset of hosts of size n_partitions_to_try\n chosen_physical = sorted_compute_nodes[:n_partitions_to_try]\n\n #\n # check if the partitioning is a feasible solution\n #\n try:\n # virtual nodes to physical nodes assignment\n res_node_mapping = {}\n\n # iterate over each pair (physical_node i, virtual nodes assigned to i)\n for physical_node, assigned_virtual_nodes in zip(\n chosen_physical, k_partition\n ):\n # keep track of the node physical resources used\n cores_used = memory_used = 0\n # check if node resources are not exceeded:\n for virtual_node in assigned_virtual_nodes:\n # cpu cores\n cores_used += self.virtual.req_cores(virtual_node)\n if self.physical.cores(physical_node) < cores_used:\n raise NodeResourceError(physical_node, \"cpu cores\")\n # memory\n memory_used += self.virtual.req_memory(virtual_node)\n if self.physical.memory(physical_node) < memory_used:\n raise NodeResourceError(physical_node, \"memory\")\n # assign the virtual nodes to a physical node\n res_node_mapping[virtual_node] = physical_node\n\n #\n # virtual links to physical links assignment\n #\n res_link_mapping = {}\n rate_used = defaultdict(int)\n\n # iterate over each virtual link between two virtual nodes not mapped on the same physical machine\n for (u, v) in (\n (u, v)\n for (u, v) in self.virtual.sorted_edges()\n if res_node_mapping[u] != res_node_mapping[v]\n ):\n\n res_link_mapping[(u, v)] = []\n\n # physical nodes on which u and v have been placed\n phy_u, phy_v = res_node_mapping[u], res_node_mapping[v]\n\n # for each link in the physical path\n for (i, j, device_id) in self.physical.find_path(\n phy_u,\n phy_v,\n req_rate=self.virtual.req_rate(u, v),\n used_rate=rate_used,\n ):\n # else update the rate\n rate_used[(i, j, device_id)] += self.virtual.req_rate(u, v)\n\n res_link_mapping[(u, v)].append((i, device_id, j))\n\n # build solution from the output\n self.solution = Solution.build_solution(\n self.virtual, self.physical, res_node_mapping, res_link_mapping\n )\n self.status = Solved\n return Solved\n\n except (NodeResourceError, NoPathFoundError):\n # unfeasible, increase the number of partitions to be used\n pass\n else:\n self.status = Infeasible\n return Infeasible", "def __initializeDistributed(self):\n self.raiseADebug(\"Initializing parallel InternalParallel: {0} Nodes: {1}\".format(self.runInfoDict['internalParallel'],len(self.runInfoDict['Nodes'])))\n if self._parallelLib != ParallelLibEnum.shared:\n # dashboard?\n db = self.runInfoDict['includeDashboard']\n # Check if the list of unique nodes is present and, in case, initialize the\n servers = None\n sys.path.append(self.runInfoDict['WorkingDir'])\n if 'UPDATE_PYTHONPATH' in self.runInfoDict:\n sys.path.extend([p.strip() for p in self.runInfoDict['UPDATE_PYTHONPATH'].split(\":\")])\n\n if _rayAvail:\n # update the python path and working dir\n olderPath = os.environ[\"PYTHONPATH\"].split(os.pathsep) if \"PYTHONPATH\" in os.environ else []\n os.environ[\"PYTHONPATH\"] = os.pathsep.join(set(olderPath+sys.path))\n\n # is ray instanciated outside?\n self.rayInstanciatedOutside = 'headNode' in self.runInfoDict\n self.daskInstanciatedOutside = 'schedulerFile' in self.runInfoDict\n if len(self.runInfoDict['Nodes']) > 0 or self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n availableNodes = [nodeId.strip() for nodeId in self.runInfoDict['Nodes']]\n uniqueN = list(set(availableNodes))\n # identify the local host name and get the number of local processors\n localHostName = self.__getLocalHost()\n self.raiseADebug(\"Head host name is : \", localHostName)\n # number of processors\n nProcsHead = availableNodes.count(localHostName)\n if not nProcsHead:\n self.raiseAWarning(\"# of local procs are 0. Only remote procs are avalable\")\n self.raiseAWarning(f'Head host name \"{localHostName}\" /= Avail Nodes \"'+', '.join(uniqueN)+'\"!')\n self.raiseADebug(\"# of local procs : \", str(nProcsHead))\n self.raiseADebug(\"# of total procs : \", str(len(availableNodes)))\n if nProcsHead != len(availableNodes) or self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n if self.rayInstanciatedOutside:\n address = self.runInfoDict['headNode']\n elif self.daskInstanciatedOutside:\n self.daskSchedulerFile = self.runInfoDict['schedulerFile']\n else:\n # create head node cluster\n # port 0 lets ray choose an available port\n address = self.__runHeadNode(nProcsHead, 0)\n if self._parallelLib == ParallelLibEnum.ray:\n # add names in runInfo\n self.runInfoDict['headNode'] = address\n self.raiseADebug(\"Head host IP :\", address)\n if self._parallelLib == ParallelLibEnum.dask:\n # add file in runInfo\n self.runInfoDict['schedulerFile'] = self.daskSchedulerFile\n self.raiseADebug('scheduler file :', self.daskSchedulerFile)\n ## Get servers and run ray or dask remote listener\n if self.rayInstanciatedOutside or self.daskInstanciatedOutside:\n servers = self.runInfoDict['remoteNodes']\n else:\n servers = self.__runRemoteListeningSockets(address, localHostName)\n # add names in runInfo\n self.runInfoDict['remoteNodes'] = servers\n if self._parallelLib == ParallelLibEnum.ray:\n ## initialize ray server with nProcs\n self._server = ray.init(address=address,log_to_driver=False,include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n if self.daskSchedulerFile is not None:\n #handle multinode and prestarted configurations\n self._server = dask.distributed.Client(scheduler_file=self.daskSchedulerFile)\n else:\n #Start locally\n cluster = dask.distributed.LocalCluster()\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseAWarning(\"No supported server\")\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"NODES IN THE CLUSTER : \", str(ray.nodes()))\n else:\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Executing RAY in the cluster but with a single node configuration\")\n self._server = ray.init(num_cpus=nProcsHead,log_to_driver=False,include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n self.raiseADebug(\"Executing DASK in the cluster but with a single node configuration\")\n #Start locally\n cluster = dask.distributed.LocalCluster()\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseADebug(\"Initializing\", str(self._parallelLib), \"locally with num_cpus: \", self.runInfoDict['totalNumCoresUsed'])\n if self._parallelLib == ParallelLibEnum.ray:\n self._server = ray.init(num_cpus=int(self.runInfoDict['totalNumCoresUsed']),include_dashboard=db)\n elif self._parallelLib == ParallelLibEnum.dask:\n #handle local method\n cluster = dask.distributed.LocalCluster(n_workers=int(self.runInfoDict['totalNumCoresUsed']))\n self._server = dask.distributed.Client(cluster)\n else:\n self.raiseAWarning(\"parallellib creation not handled\")\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Head node IP address: \", self._server.address_info['node_ip_address'])\n self.raiseADebug(\"Redis address : \", self._server.address_info['redis_address'])\n self.raiseADebug(\"Object store address: \", self._server.address_info['object_store_address'])\n self.raiseADebug(\"Raylet socket name : \", self._server.address_info['raylet_socket_name'])\n self.raiseADebug(\"Session directory : \", self._server.address_info['session_dir'])\n self.raiseADebug(\"GCS Address : \", self._server.address_info['gcs_address'])\n if servers:\n self.raiseADebug(\"# of remote servers : \", str(len(servers)))\n self.raiseADebug(\"Remote servers : \", \" , \".join(servers))\n else:\n self.raiseADebug(\"JobHandler initialized without ray\")\n else:\n ## We are just using threading\n self._server = None\n self.raiseADebug(\"JobHandler initialized with threading\")\n # ray or dask is initialized\n self.__isDistributedInitialized = True", "def main(config, black_box_function=None, output_file=\"\"):\r\n start_time = (datetime.datetime.now())\r\n run_directory = config[\"run_directory\"]\r\n hypermapper_mode = config[\"hypermapper_mode\"][\"mode\"]\r\n\r\n # Start logging\r\n log_file = deal_with_relative_and_absolute_path(run_directory, config[\"log_file\"])\r\n sys.stdout.change_log_file(log_file)\r\n if (hypermapper_mode == 'client-server'):\r\n sys.stdout.switch_log_only_on_file(True)\r\n\r\n # Log the json configuration for this optimization\r\n sys.stdout.write_to_logfile(str(config) + \"\\n\")\r\n\r\n # Create parameter space object and unpack hyperparameters from json\r\n param_space = space.Space(config)\r\n application_name = config[\"application_name\"]\r\n optimization_metrics = config[\"optimization_objectives\"]\r\n optimization_iterations = config[\"optimization_iterations\"]\r\n evaluations_per_optimization_iteration = config[\"evaluations_per_optimization_iteration\"]\r\n batch_mode = evaluations_per_optimization_iteration > 1\r\n number_of_cpus = config[\"number_of_cpus\"]\r\n print_importances = config[\"print_parameter_importance\"]\r\n epsilon_greedy_threshold = config[\"epsilon_greedy_threshold\"]\r\n acquisition_function = config[\"acquisition_function\"]\r\n weight_sampling = config[\"weight_sampling\"]\r\n scalarization_method = config[\"scalarization_method\"]\r\n scalarization_key = config[\"scalarization_key\"]\r\n doe_type = config[\"design_of_experiment\"][\"doe_type\"]\r\n number_of_doe_samples = config[\"design_of_experiment\"][\"number_of_samples\"]\r\n model_type = config[\"models\"][\"model\"]\r\n optimization_method = config[\"optimization_method\"]\r\n time_budget = config[\"time_budget\"]\r\n input_params = param_space.get_input_parameters()\r\n number_of_objectives = len(optimization_metrics)\r\n objective_limits = {}\r\n data_array = {}\r\n fast_addressing_of_data_array = {}\r\n objective_bounds = None\r\n exhaustive_search_data_array = None\r\n normalize_objectives = False\r\n debug = False\r\n\r\n if \"feasible_output\" in config:\r\n feasible_output = config[\"feasible_output\"]\r\n feasible_output_name = feasible_output[\"name\"]\r\n enable_feasible_predictor = feasible_output[\"enable_feasible_predictor\"]\r\n enable_feasible_predictor_grid_search_on_recall_and_precision = feasible_output[\"enable_feasible_predictor_grid_search_on_recall_and_precision\"]\r\n feasible_predictor_grid_search_validation_file = feasible_output[\"feasible_predictor_grid_search_validation_file\"]\r\n feasible_parameter = param_space.get_feasible_parameter()\r\n number_of_trees = config[\"models\"][\"number_of_trees\"]\r\n\r\n if (weight_sampling == \"bounding_box\"):\r\n objective_bounds = {}\r\n user_bounds = config[\"bounding_box_limits\"]\r\n if (len(user_bounds) == 2):\r\n if (user_bounds[0] > user_bounds[1]):\r\n user_bounds[0], user_bounds[1] = user_bounds[1], user_bounds[0]\r\n for objective in optimization_metrics:\r\n objective_bounds[objective] = user_bounds\r\n objective_limits[objective] = user_bounds\r\n elif (len(user_bounds) == number_of_objectives*2):\r\n idx = 0\r\n for objective in optimization_metrics:\r\n objective_bounds[objective] = user_bounds[idx:idx+2]\r\n if (objective_bounds[objective][0] > objective_bounds[objective][1]):\r\n objective_bounds[objective][0], objective_bounds[objective][1] = objective_bounds[objective][1], objective_bounds[objective][0]\r\n objective_limits[objective] = objective_bounds[objective]\r\n idx += 2\r\n else:\r\n print(\"Wrong number of bounding boxes, expected 2 or\", 2*number_of_objectives, \"got\", len(user_bounds))\r\n raise SystemExit\r\n else:\r\n for objective in optimization_metrics:\r\n objective_limits[objective] = [float(\"inf\"), float(\"-inf\")]\r\n\r\n if output_file == \"\":\r\n output_data_file = config[\"output_data_file\"]\r\n if output_data_file == \"output_samples.csv\":\r\n output_data_file = application_name + \"_\" + output_data_file\r\n else:\r\n output_data_file = output_file\r\n\r\n exhaustive_search_data_array = None\r\n exhaustive_search_fast_addressing_of_data_array = None\r\n if hypermapper_mode == 'exhaustive':\r\n exhaustive_file = config[\"hypermapper_mode\"][\"exhaustive_search_file\"]\r\n exhaustive_search_data_array, exhaustive_search_fast_addressing_of_data_array = param_space.load_data_file(exhaustive_file, debug=False, number_of_cpus=number_of_cpus)\r\n\r\n # Check if some parameters are correctly defined\r\n if hypermapper_mode == \"default\":\r\n if black_box_function == None:\r\n print(\"Error: the black box function must be provided\")\r\n raise SystemExit\r\n if not callable(black_box_function):\r\n print(\"Error: the black box function parameter is not callable\")\r\n raise SystemExit\r\n\r\n if (model_type == \"gaussian_process\") and (acquisition_function == \"TS\"):\r\n print(\"Error: The TS acquisition function with Gaussian Process models is still under implementation\")\r\n print(\"Using EI acquisition function instead\")\r\n config[\"acquisition_function\"] = \"EI\"\r\n\r\n if number_of_cpus > 1:\r\n print(\"Warning: HyperMapper supports only sequential execution for now. Running on a single cpu.\")\r\n number_of_cpus = 1\r\n\r\n # If priors are present, use prior-guided optimization\r\n user_priors = False\r\n for input_param in config[\"input_parameters\"]:\r\n if config[\"input_parameters\"][input_param][\"prior\"] != \"uniform\":\r\n if number_of_objectives == 1:\r\n user_priors = True\r\n else:\r\n print(\"Warning: prior optimization does not work with multiple objectives yet, priors will be uniform\")\r\n config[\"input_parameters\"][input_param][\"prior\"] = \"uniform\"\r\n\r\n if user_priors:\r\n bo_method = prior_guided_optimization\r\n else:\r\n bo_method = random_scalarizations\r\n normalize_objectives = True\r\n\r\n ### Resume previous optimization, if any\r\n beginning_of_time = param_space.current_milli_time()\r\n absolute_configuration_index = 0\r\n doe_t0 = datetime.datetime.now()\r\n if config[\"resume_optimization\"] == True:\r\n resume_data_file = config[\"resume_optimization_data\"]\r\n\r\n if not resume_data_file.endswith('.csv'):\r\n print(\"Error: resume data file must be a CSV\")\r\n raise SystemExit\r\n if resume_data_file == \"output_samples.csv\":\r\n resume_data_file = application_name + \"_\" + resume_data_file\r\n\r\n data_array, fast_addressing_of_data_array = param_space.load_data_file(resume_data_file, debug=False, number_of_cpus=number_of_cpus)\r\n absolute_configuration_index = len(data_array[list(data_array.keys())[0]]) # get the number of points evaluated in the previous run\r\n beginning_of_time = beginning_of_time - data_array[param_space.get_timestamp_parameter()[0]][-1] # Set the timestamp back to match the previous run\r\n print(\"Resumed optimization, number of samples = %d .......\" % absolute_configuration_index)\r\n\r\n ### DoE phase\r\n if absolute_configuration_index < number_of_doe_samples:\r\n configurations = []\r\n default_configuration = param_space.get_default_or_random_configuration()\r\n str_data = param_space.get_unique_hash_string_from_values(default_configuration)\r\n if str_data not in fast_addressing_of_data_array:\r\n fast_addressing_of_data_array[str_data] = absolute_configuration_index\r\n configurations.append(default_configuration)\r\n absolute_configuration_index += 1\r\n\r\n doe_configurations = []\r\n if absolute_configuration_index < number_of_doe_samples:\r\n doe_configurations = param_space.get_doe_sample_configurations(\r\n fast_addressing_of_data_array,\r\n number_of_doe_samples-absolute_configuration_index,\r\n doe_type)\r\n configurations += doe_configurations\r\n print(\"Design of experiment phase, number of new doe samples = %d .......\" % len(configurations))\r\n\r\n doe_data_array = param_space.run_configurations(\r\n hypermapper_mode,\r\n configurations,\r\n beginning_of_time,\r\n black_box_function,\r\n exhaustive_search_data_array,\r\n exhaustive_search_fast_addressing_of_data_array,\r\n run_directory,\r\n batch_mode=batch_mode)\r\n data_array = concatenate_data_dictionaries(\r\n data_array,\r\n doe_data_array,\r\n param_space.input_output_and_timestamp_parameter_names)\r\n absolute_configuration_index = number_of_doe_samples\r\n iteration_number = 1\r\n else:\r\n iteration_number = absolute_configuration_index - number_of_doe_samples + 1\r\n\r\n # If we have feasibility constraints, we must ensure we have at least one feasible and one infeasible sample before starting optimization\r\n # If this is not true, continue design of experiment until the condition is met\r\n if enable_feasible_predictor:\r\n while are_all_elements_equal(data_array[feasible_parameter[0]]) and optimization_iterations > 0:\r\n print(\"Warning: all points are either valid or invalid, random sampling more configurations.\")\r\n print(\"Number of doe samples so far:\", absolute_configuration_index)\r\n configurations = param_space.get_doe_sample_configurations(fast_addressing_of_data_array, 1, \"random sampling\")\r\n new_data_array = param_space.run_configurations(\r\n hypermapper_mode,\r\n configurations,\r\n beginning_of_time,\r\n black_box_function,\r\n exhaustive_search_data_array,\r\n exhaustive_search_fast_addressing_of_data_array,\r\n run_directory,\r\n batch_mode=batch_mode)\r\n data_array = concatenate_data_dictionaries(\r\n new_data_array,\r\n data_array,\r\n param_space.input_output_and_timestamp_parameter_names)\r\n absolute_configuration_index += 1\r\n optimization_iterations -= 1\r\n\r\n # Create output file with explored configurations from resumed run and DoE\r\n with open(deal_with_relative_and_absolute_path(run_directory, output_data_file), 'w') as f:\r\n w = csv.writer(f)\r\n w.writerow(param_space.get_input_output_and_timestamp_parameters())\r\n tmp_list = [param_space.convert_types_to_string(j, data_array) for j in param_space.get_input_output_and_timestamp_parameters()]\r\n tmp_list = list(zip(*tmp_list))\r\n for i in range(len(data_array[optimization_metrics[0]])):\r\n w.writerow(tmp_list[i])\r\n\r\n for objective in optimization_metrics:\r\n lower_bound = min(objective_limits[objective][0], min(data_array[objective]))\r\n upper_bound = max(objective_limits[objective][1], max(data_array[objective]))\r\n objective_limits[objective] = [lower_bound, upper_bound]\r\n print(\"\\nEnd of doe/resume phase, the number of evaluated configurations is: %d\\n\" %absolute_configuration_index)\r\n sys.stdout.write_to_logfile((\"End of DoE - Time %10.4f sec\\n\" % ((datetime.datetime.now() - doe_t0).total_seconds())))\r\n if doe_type == \"grid_search\" and optimization_iterations > 0:\r\n print(\"Warning: DoE is grid search, setting number of optimization iterations to 0\")\r\n optimization_iterations = 0\r\n\r\n ### Main optimization loop\r\n bo_t0 = datetime.datetime.now()\r\n run_time = (datetime.datetime.now() - start_time).total_seconds() / 60\r\n # run_time / time_budget < 1 if budget > elapsed time or budget == -1\r\n if time_budget > 0:\r\n print('starting optimization phase, limited to run for ', time_budget, ' minutes')\r\n elif time_budget == 0:\r\n print('Time budget cannot be zero. To not limit runtime set time_budget = -1')\r\n sys.exit()\r\n\r\n configurations = []\r\n evaluation_budget = optimization_iterations * evaluations_per_optimization_iteration\r\n iteration_number = 0\r\n evaluation_count = 0\r\n while evaluation_count < evaluation_budget and run_time / time_budget < 1:\r\n if evaluation_count % evaluations_per_optimization_iteration == 0:\r\n iteration_number += 1\r\n print(\"Starting optimization iteration\", iteration_number)\r\n iteration_t0 = datetime.datetime.now()\r\n\r\n model_t0 = datetime.datetime.now()\r\n regression_models,_,_ = models.generate_mono_output_regression_models(\r\n data_array,\r\n param_space,\r\n input_params,\r\n optimization_metrics,\r\n 1.00,\r\n config,\r\n model_type=model_type,\r\n number_of_cpus=number_of_cpus,\r\n print_importances=print_importances,\r\n normalize_objectives=normalize_objectives,\r\n objective_limits=objective_limits)\r\n\r\n classification_model = None\r\n if enable_feasible_predictor:\r\n classification_model,_,_ = models.generate_classification_model(application_name,\r\n param_space,\r\n data_array,\r\n input_params,\r\n feasible_parameter,\r\n 1.00,\r\n config,\r\n debug,\r\n number_of_cpus=number_of_cpus,\r\n data_array_exhaustive=exhaustive_search_data_array,\r\n enable_feasible_predictor_grid_search_on_recall_and_precision=enable_feasible_predictor_grid_search_on_recall_and_precision,\r\n feasible_predictor_grid_search_validation_file=feasible_predictor_grid_search_validation_file,\r\n print_importances=print_importances)\r\n model_t1 = datetime.datetime.now()\r\n sys.stdout.write_to_logfile((\"Model fitting time %10.4f sec\\n\" % ((model_t1 - model_t0).total_seconds())))\r\n if (weight_sampling == \"bounding_box\"):\r\n objective_weights = sample_weight_bbox(optimization_metrics, objective_bounds, objective_limits, 1)[0]\r\n elif (weight_sampling == \"flat\"):\r\n objective_weights = sample_weight_flat(optimization_metrics, 1)[0]\r\n else:\r\n print(\"Error: unrecognized option:\", weight_sampling)\r\n raise SystemExit\r\n\r\n data_array_scalarization, _ = compute_data_array_scalarization(\r\n data_array,\r\n objective_weights,\r\n objective_limits,\r\n scalarization_method)\r\n data_array[scalarization_key] = data_array_scalarization.tolist()\r\n\r\n epsilon = random.uniform(0,1)\r\n local_search_t0 = datetime.datetime.now()\r\n if epsilon > epsilon_greedy_threshold:\r\n best_configuration = bo_method(\r\n config,\r\n data_array,\r\n param_space,\r\n fast_addressing_of_data_array,\r\n regression_models,\r\n iteration_number,\r\n objective_weights,\r\n objective_limits,\r\n classification_model)\r\n\r\n else:\r\n sys.stdout.write_to_logfile(str(epsilon) + \" < \" + str(epsilon_greedy_threshold) + \" random sampling a configuration to run\\n\")\r\n tmp_fast_addressing_of_data_array = copy.deepcopy(fast_addressing_of_data_array)\r\n best_configuration = param_space.random_sample_configurations_without_repetitions(tmp_fast_addressing_of_data_array, 1)[0]\r\n local_search_t1 = datetime.datetime.now()\r\n sys.stdout.write_to_logfile((\"Local search time %10.4f sec\\n\" % ((local_search_t1 - local_search_t0).total_seconds())))\r\n\r\n configurations.append(best_configuration)\r\n\r\n # When we have selected \"evaluations_per_optimization_iteration\" configurations, evaluate the batch\r\n if evaluation_count % evaluations_per_optimization_iteration == (evaluations_per_optimization_iteration - 1):\r\n black_box_function_t0 = datetime.datetime.now()\r\n new_data_array = param_space.run_configurations(\r\n hypermapper_mode,\r\n configurations,\r\n beginning_of_time,\r\n black_box_function,\r\n exhaustive_search_data_array,\r\n exhaustive_search_fast_addressing_of_data_array,\r\n run_directory,\r\n batch_mode=batch_mode)\r\n black_box_function_t1 = datetime.datetime.now()\r\n sys.stdout.write_to_logfile((\"Black box function time %10.4f sec\\n\" % ((black_box_function_t1 - black_box_function_t0).total_seconds())))\r\n\r\n # If running batch BO, we will have some liars in fast_addressing_of_data, update them with the true value\r\n for configuration_idx in range(len(new_data_array[list(new_data_array.keys())[0]])):\r\n configuration = get_single_configuration(new_data_array, configuration_idx)\r\n str_data = param_space.get_unique_hash_string_from_values(configuration)\r\n if str_data in fast_addressing_of_data_array:\r\n absolute_index = fast_addressing_of_data_array[str_data]\r\n for header in configuration:\r\n data_array[header][absolute_index] = configuration[header]\r\n else:\r\n fast_addressing_of_data_array[str_data] = absolute_configuration_index\r\n absolute_configuration_index += 1\r\n for header in configuration:\r\n data_array[header].append(configuration[header])\r\n\r\n # and save results\r\n with open(deal_with_relative_and_absolute_path(run_directory, output_data_file), 'a') as f:\r\n w = csv.writer(f)\r\n tmp_list = [param_space.convert_types_to_string(j, new_data_array) for j in list(param_space.get_input_output_and_timestamp_parameters())]\r\n tmp_list = list(zip(*tmp_list))\r\n for i in range(len(new_data_array[optimization_metrics[0]])):\r\n w.writerow(tmp_list[i])\r\n configurations = []\r\n else:\r\n # If we have not selected all points in the batch yet, add the model prediction as a 'liar'\r\n for header in best_configuration:\r\n data_array[header].append(best_configuration[header])\r\n\r\n bufferx = [tuple(best_configuration.values())]\r\n prediction_means, _ = models.compute_model_mean_and_uncertainty(bufferx, regression_models, model_type, param_space)\r\n for objective in prediction_means:\r\n data_array[objective].append(prediction_means[objective][0])\r\n\r\n if classification_model is not None:\r\n classification_prediction_results = models.model_probabilities(bufferx,classification_model,param_space)\r\n true_value_index = classification_model[feasible_parameter[0]].classes_.tolist().index(True)\r\n feasibility_indicator = classification_prediction_results[feasible_parameter[0]][:,true_value_index]\r\n data_array[feasible_output_name].append(True if feasibility_indicator[0] >= 0.5 else False)\r\n\r\n data_array[param_space.get_timestamp_parameter()[0]].append(absolute_configuration_index)\r\n str_data = param_space.get_unique_hash_string_from_values(best_configuration)\r\n fast_addressing_of_data_array[str_data] = absolute_configuration_index\r\n absolute_configuration_index += 1\r\n\r\n\r\n for objective in optimization_metrics:\r\n lower_bound = min(objective_limits[objective][0], min(data_array[objective]))\r\n upper_bound = max(objective_limits[objective][1], max(data_array[objective]))\r\n objective_limits[objective] = [lower_bound, upper_bound]\r\n\r\n evaluation_count += 1\r\n run_time = (datetime.datetime.now() - start_time).total_seconds() / 60\r\n\r\n sys.stdout.write_to_logfile((\"Total iteration time %10.4f sec\\n\" % ((datetime.datetime.now() - iteration_t0).total_seconds())))\r\n sys.stdout.write_to_logfile((\"End of BO phase - Time %10.4f sec\\n\" % ((datetime.datetime.now() - bo_t0).total_seconds())))\r\n\r\n print(\"End of Bayesian Optimization\")\r\n sys.stdout.write_to_logfile((\"Total script time %10.2f sec\\n\" % ((datetime.datetime.now() - start_time).total_seconds())))", "def _calc_desc_and_queues(\n total_numa_nodes, total_ports_per_numa, total_rx_queues, ports_per_numa_value\n ):\n\n # Get the number of rx queues\n rx_queues = max(1, total_rx_queues)\n tx_queues = rx_queues * total_numa_nodes + 1\n\n # Get the descriptor entries\n desc_entries = 1024\n ports_per_numa_value[\"rx_queues\"] = rx_queues\n total_mbufs = (\n (rx_queues * desc_entries) + (tx_queues * desc_entries)\n ) * total_ports_per_numa\n\n return total_mbufs", "def _freeze_stages(self) -> None:\n if self.frozen_stages >= 0:\n if self.deep_stem:\n self.stem.eval()\n for param in self.stem.parameters():\n param.requires_grad = False\n else:\n self.norm1.eval()\n for m in [self.conv1, self.norm1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False", "def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False", "def _setup_arrival_calcs(metrics_client: HeronMetricsClient,\n graph_client: GremlinClient,\n topology_id: str, cluster: str, environ: str,\n topology_ref: str, start: dt.datetime,\n end: dt.datetime, io_bucket_length: int,\n tracker_url: str, **kwargs: Union[str, int, float]\n ) -> Tuple[pd.DataFrame, List[List[Vertex]],\n pd.DataFrame, Dict[Vertex, List[int]],\n Dict[Vertex, List[int]]]:\n\n topo_traversal: GraphTraversalSource = \\\n graph_client.topology_subgraph(topology_id, topology_ref)\n\n # Calculate the routing probabilities for the defined metric gathering\n # period\n i2i_rps: pd.Series = (calculate_inter_instance_rps(\n metrics_client, topology_id, cluster, environ, start, end, tracker_url,\n **kwargs).set_index([\"source_task\", \"destination_task\", \"stream\"])\n [\"routing_probability\"])\n\n # Get the vertex levels for the logical graph tree\n LOG.info(\"Calculating levels for topology %s reference %s\", topology_id,\n topology_ref)\n levels: List[List[Vertex]] = get_levels(topo_traversal)\n LOG.debug(\"Found %d levels is topology %s reference %s\", len(levels),\n topology_id, topology_ref)\n\n # Calculate the input output ratios for each instances using data from the\n # defined metrics gathering period\n coefficients: pd.Series = lstsq_io_ratios(\n metrics_client, graph_client, topology_id, cluster, environ, start,\n end, io_bucket_length, **kwargs).set_index([\"task\", \"output_stream\",\n \"input_stream\",\n \"source_component\"]\n )[\"coefficient\"]\n\n # Get the details of the incoming and outgoing physical connections for\n # stream manager in the topology\n\n # Get a dictionary mapping from stream manager id string to a list of the\n # instances (within each container) that will send tuples to each stream\n # manager\n sending_instances: Dict[Vertex, List[int]] = \\\n (topo_traversal.V().hasLabel(\"stream_manager\")\n .group().by(\"id\").by(in_(\"physically_connected\")\n .hasLabel(P.within(\"spout\", \"bolt\"))\n .values(\"task_id\")\n .fold())\n .next())\n\n # Get a dictionary mapping from stream manager id string to a list of the\n # instances (within each container) that will receive tuples from each\n # stream manager\n receiving_instances: Dict[Vertex, List[int]] = \\\n (topo_traversal.V().hasLabel(\"stream_manager\")\n .group().by(\"id\").by(out(\"physically_connected\")\n .hasLabel(\"bolt\").values(\"task_id\").fold())\n .next())\n\n return (i2i_rps, levels, coefficients, sending_instances,\n receiving_instances)", "def hopper(local_dir, cpus, gpus, num_parallel, num_samples):\n\n # Final Version\n\n from design_baselines.mins import mins\n ray.init(num_cpus=cpus,\n num_gpus=gpus,\n include_dashboard=False,\n _temp_dir=os.path.expanduser('~/tmp'))\n tune.run(mins, config={\n \"logging_dir\": \"data\",\n \"task\": \"HopperController-Exact-v0\",\n \"task_kwargs\": {\"relabel\": False},\n \"val_size\": 200,\n \"offline\": True,\n \"normalize_ys\": True,\n \"normalize_xs\": True,\n \"base_temp\": 0.1,\n \"noise_std\": 0.0,\n \"method\": \"wasserstein\",\n \"use_conv\": False,\n \"gan_batch_size\": 128,\n \"hidden_size\": 1024,\n \"num_layers\": 1,\n \"bootstraps\": 1,\n \"initial_max_std\": 0.2,\n \"initial_min_std\": 0.1,\n \"oracle_lr\": 0.001,\n \"oracle_batch_size\": 128,\n \"oracle_epochs\": 100,\n \"latent_size\": 32,\n \"critic_frequency\": 10,\n \"flip_frac\": 0,\n \"fake_pair_frac\": 0.,\n \"penalty_weight\": 10.,\n \"generator_lr\": 2e-4,\n \"generator_beta_1\": 0.0,\n \"generator_beta_2\": 0.9,\n \"discriminator_lr\": 2e-4,\n \"discriminator_beta_1\": 0.0,\n \"discriminator_beta_2\": 0.9,\n \"initial_epochs\": 500,\n \"epochs_per_iteration\": 0,\n \"iterations\": 0,\n \"exploration_samples\": 0,\n \"exploration_rate\": 0.,\n \"thompson_samples\": 0,\n \"solver_samples\": 128, \"do_evaluation\": True},\n num_samples=num_samples,\n local_dir=local_dir,\n resources_per_trial={'cpu': cpus // num_parallel,\n 'gpu': gpus / num_parallel - 0.01})", "def compute_network_load(env: ControlledRandomWalk) -> float:\n rho = cvx.Variable()\n zeta = cvx.Variable((env.num_activities, 1))\n objective = cvx.Minimize(rho)\n constraints = [\n env.job_generator.buffer_processing_matrix * zeta + env.job_generator.demand_rate\n == np.zeros((env.num_buffers, 1)),\n env.constituency_matrix * zeta <= rho * np.ones((env.num_resources, 1)),\n zeta >= np.zeros((env.num_activities, 1))\n ]\n prob = cvx.Problem(objective, constraints)\n network_load = prob.solve(solver=cvx.SCS, eps=1e-7)\n return network_load", "def evalPart(origTopo, capacities, subTopos):\r\n # print \"\\t===== Evaluate Partitioning =====\"\r\n numTopos = len(subTopos)\r\n numPM = len(capacities)\r\n if (numTopos > numPM):\r\n logger.error(\"Number of sub topologies does not match number of PMs\")\r\n exit()\r\n \r\n weights = {x:0 for x in range(numPM)}\r\n cutWeights = {x:0 for x in range(numPM)}\r\n subLinks = list(itertools.chain(*[subTopos[x].links(sort=True) for x in range(numTopos)]))\r\n cuts = [x for x in origTopo.links(sort=True) if x not in subLinks]\r\n\r\n for i in range(numTopos):\r\n weights[i] = calcTopoWeight(subTopos[i])\r\n cutWeights[i] = 0\r\n\r\n # for i in range(numTopos):\r\n # weights[i] = 0.0\r\n # for link in subTopos[i].links():\r\n # if origTopo.isSwitch(link[0]) and origTopo.isSwitch(link[1]):\r\n # weights[i] = weights[i] + subTopos[i].linkInfo(link[0], link[1])[\"bw\"]\r\n \r\n for link in cuts:\r\n for i in range(numTopos):\r\n if link[0] in subTopos[i].switches() or link[1] in subTopos[i].switches():\r\n weights[i] = weights[i] + origTopo.linkInfo(link[0], link[1])[\"bw\"]\r\n cutWeights[i] = cutWeights[i] + origTopo.linkInfo(link[0], link[1])[\"bw\"]\r\n\r\n return [weights, cutWeights]\r\n # return sorted(weights.values(), reverse=True)\r\n # wSum = sum(weights.values())\r\n # print \"\\tPart\\tCap\\tWeight\\tFraction\"\r\n # for x in range(numPM):\r\n # print \"\\t%d\\t%.1f\\t%.1f\\t%.4f\" % (x, capacities[x], weights[x], weights[x]/wSum)\r", "def collect_pipeline_runs(self):\n db = self.mongo_client.metalearning\n collection = db.pipeline_runs\n collection_size = collection.count()\n pipeline_cursor = collection.find()\n list_of_experiments = {\"classification\": [], \"regression\": []}\n for index, pipeline_run in enumerate(pipeline_cursor):\n if index % 1000 == 0:\n print(\"At {} out of {} documents\".format(index, collection_size))\n # if index == 2000:\n # # running into memory errors\n # break\n pipeline_run_info = self.get_pipeline_run_info(pipeline_run)\n metafeatures = self.get_metafeature_info(pipeline_run)\n # TODO: get all metafeatures so we don't need this\n if metafeatures != {}:\n experiment_json = dict(pipeline_run_info, **metafeatures)\n list_of_experiments[experiment_json[\"problem_type\"]].append(experiment_json)\n\n for problem_type in list_of_experiments.keys():\n final_data_file = json.dumps(list_of_experiments[problem_type], sort_keys=True, indent=4, default=json_util.default)\n with open(\"data/complete_pipelines_and_metafeatures_test_{}.json\".format(problem_type), \"w\") as file:\n file.write(final_data_file)\n\n return", "def parallelization_factor(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"parallelization_factor\")", "def set_optimization_priority(self):\n\n if len(self.groups) == 0 and len(self.servers) == 0:\n return\n\n if self.resource.CPU_avail > 0:\n app_cpu_weight = float(self.total_CPU) / float(self.resource.CPU_avail)\n else:\n if self.total_CPU > 0:\n app_cpu_weight = 1.0\n else:\n app_cpu_weight = 0.0\n\n if self.resource.mem_avail > 0:\n app_mem_weight = float(self.total_mem) / float(self.resource.mem_avail)\n else:\n if self.total_mem > 0:\n app_mem_weight = 1.0\n else:\n app_mem_weight = 0.0\n\n if self.resource.local_disk_avail > 0:\n app_local_vol_weight = float(self.total_local_vol) / float(self.resource.local_disk_avail)\n else:\n if self.total_local_vol > 0:\n app_local_vol_weight = 1.0\n else:\n app_local_vol_weight = 0.0\n\n opt = [(\"cpu\", app_cpu_weight),\n (\"mem\", app_mem_weight),\n (\"lvol\", app_local_vol_weight)]\n\n self.optimization_priority = sorted(opt, key=lambda resource: resource[1], reverse=True)", "def prob_service(state, lambda_1, mu, num_of_servers):\n return (min(state[1], num_of_servers) * mu) / (\n lambda_1 + (mu * min(state[1], num_of_servers))\n )", "def main_proc(self, ds=5.0):\n\n # Preprocessing\n # downsampling edge pixels\n pcd_t_ds = self.pcd_t.voxel_down_sample(voxel_size=ds)\n pcd_t_ds, center_t = centering(pcd_t_ds)\n self.result_id = 0\n reg_trans = None\n\n self.pcd_registrated = list() # results of ICP\n for i in range(len(self.pcd_s)):\n self.pcd_s[i].paint_uniform_color([0.0, 0.0, 1.0])\n pcd_s_ds = self.pcd_s[i].voxel_down_sample(voxel_size=ds)\n\n pcd_s_ds, center_s = centering(pcd_s_ds)\n ts_c = np.identity(4)\n ts_c[:3, 3] = -center_s\n tt_c = np.identity(4)\n tt_c[:3, 3] = center_t\n\n # Registration by ICP algorithm\n reg = ICPRegistration(pcd_s_ds, pcd_t_ds)\n reg.set_distance_tolerance(ds * 0.5)\n mse, rt = reg.registration()\n if mse < self.mse:\n self.result_id = i\n print(\"Init:\", self.initial_angles[i], self.mse, \"==>\", mse)\n self.mse = mse\n reg_trans = rt\n TT = np.dot(reg_trans, ts_c)\n self.trans_final = np.dot(tt_c, TT)\n\n # check transformation progress\n \"\"\"\n hoge = copy.deepcopy(pcd_s_ds)\n hoge.paint_uniform_color([1,0,0])\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100., origin=[0.0,0.0,0.0])\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n hoge.transform( rt )\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n \"\"\"\n\n self.pcds = reg.pcds\n self.d = reg.d\n # Get registration result\n # translation[x,y] and rotation\n _, _, rotate = mat2rpy(self.trans_final)\n print(\"Initial angle is:\", self.initial_angles[self.result_id])\n rotate = np.radians(self.initial_angles[self.result_id]) + rotate\n translation = self.trans_final[:2, 3]\n\n # Choose the direction that results in the smaller rotation\n if rotate > tau:\n rotate -= tau\n elif rotate < 0:\n rotate += tau\n\n self.rotate = rotate\n return self.rotate, translation, self.mse", "def process_parameters(task, addr_space, model, export_path, alpha):\n\n global recovered_c_structs\n global recovered_python_objects\n global false_positives\n global hyperparameters\n\n all_layers = []\n shape = OrderedDict()\n name_to_weights = {}\n tot_num_elements = 0\n tensor_offsets = {}\n\n all_layers = bfs(model)\n \n for path, layer in all_layers:\n layer_dict = layer.in_dict.dereference().val\n layer_name = layer.ob_type.dereference().name\n recovered_python_objects += 1\n\n print\n print path, layer.ob_type.dereference().name\n\n if \"Dropout\" in layer_name:\n shape[path] = layer_dict['p'] # dropout rate\n recovered_python_objects += 1\n hyperparameters += 1\n print \"Dropout Rate:\", shape[path]\n\n elif \"ReLU\" in layer_name:\n shape[path] = None\n\n elif layer_dict['_parameters'].ma_used == 0 and layer_dict['_buffers'].ma_used == 0:\n shape[path] = None\n print \"No Weights\"\n continue\n \n if layer_dict['_parameters'].ma_used > 0:\n tensor_dict = layer_dict['_parameters'].val\n for key in tensor_dict:\n if tensor_dict[key] == None:\n continue\n tensor = tensor_dict[key].tensor.dereference()\n uid = path + \".\" + key\n print \"Path:\", uid\n print \"Num Elements:\", tensor.num_elements\n print \"Shape:\", tensor.shape\n recovered_python_objects += 1\n recovered_c_structs += 2\n shape[uid] = tensor.shape\n final_addr = tensor.storage.buf\n name_to_weights[uid] = extract_data(addr_space, tensor.num_elements, final_addr)\n tensor_offsets[uid] = int(tensor.obj_offset)\n tot_num_elements += tensor.num_elements\n\n if layer_dict['_buffers'].ma_used > 0:\n tensor_dict = layer_dict['_buffers'].val\n for key in tensor_dict:\n if tensor_dict[key] == None:\n continue\n tensor = tensor_dict[key].tensor.dereference()\n uid = path + \".\" + key\n print \"Path:\", uid\n print \"Num Elements:\", tensor.num_elements\n print \"Shape:\", tensor.shape\n recovered_python_objects += 1\n recovered_c_structs += 2\n shape[uid] = tensor.shape\n final_addr = tensor.storage.dereference().buf\n if key != \"num_batches_tracked\":\n name_to_weights[uid] = extract_data(addr_space, tensor.num_elements, final_addr)\n else:\n found_object = obj.Object(\"int\",\n offset=final_addr,\n vm=addr_space)\n name_to_weights[uid] = [int(found_object)]\n print name_to_weights[uid]\n tensor_offsets[uid] = int(tensor.obj_offset)\n tot_num_elements += tensor.num_elements\n\n export_weights(task, name_to_weights, tot_num_elements, export_path, alpha, str(task.pid))\n export_offsets(task, tensor_offsets, export_path, alpha)\n\n print \"\\nMODEL SUMMARY\"\n for key in shape:\n print key\n print shape[key]\n print\n\n print \"\\nEVAL TABLE SUMMARY\"\n print \"Layers:\", len(all_layers)\n print \"Tensors:\", len(name_to_weights)\n print \"Weights:\", tot_num_elements\n print \"Hyper Parameters:\", hyperparameters\n print \"Precision:\", len(name_to_weights), \"/\", len(name_to_weights) + false_positives, \"=\", float(len(name_to_weights)) / float(len(name_to_weights) + false_positives)\n print \"Python Objects:\", recovered_python_objects\n print \"C Structs:\", recovered_c_structs", "def outputs(self):\n\t\treturn {k: v * self.throughput for k, v in self.per_process_outputs.items()}", "def stage(self):\n pass", "def _preprocess(self):\n if self.gt_matrix_PCs is None:\n start = time.time()\n gt_matrix = self._process_vcf().get('gt_matrix')\n print('gt_matrix took {} secs'.format(time.time() - start))\n \n # Normalize gt_matrix by site\n gt_matrix_norm = gt_matrix - np.mean(gt_matrix, axis=1)[:, np.newaxis]\n \n # PCA\n start = time.time()\n u, s, vh = np.linalg.svd(gt_matrix_norm.T, full_matrices=False)\n print('SVD took {} secs'.format(time.time() - start))\n self.gt_matrix_PCs = -u @ np.diag(s)\n \n # Get relevant objects from result of ripser\n if self.ripser_result is None:\n start = time.time()\n print(\"Getting ripser object\")\n self.ripser_result = ripser(self.gt_matrix_PCs, coeff=2, maxdim=1, do_cocycles=True)\n print('Ripser took {} secs'.format(time.time() - start))", "def cost(self):\n cost = {}\n if len(self.nodes) == 0:\n return cost\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.cost[r] for n in self.nodes]\n estimator = AvgAggregatorEstimator(values)\n cost[r] = estimator\n return cost", "def do_workload(self):\n pass", "def num_stages(tree, order):\n p = len(order)\n stages = cstree_to_stages(tree, order)\n return sum([len(stages[i]) for i in range(1,p)])", "def __init__(self, init_size=31):\n self.keys = build_array(init_size) # Parallel arrays - key[]\n self.values = build_array(init_size) # Parallel arrays - values[]\n self.size = init_size\n self.count = 0\n # Task3 counters\n self.count_collisions = 0\n self.total_probe_length = 0\n self.count_rehashes = 0\n self.longest_probe_chain = 0", "def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))", "def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.input_dim, f'input_dim is {p.input_dim}'\n assert p.hidden_dim, f'hidden_dim is {p.hidden_dim}'\n assert p.num_heads > 0, f'num_heads is {p.num_heads}'\n # if proj_tpl does not have dim_per_head set, set it\n if p.proj_tpl.dim_per_head == 0:\n p.proj_tpl.dim_per_head = self.dim_per_head\n\n if p.device_mesh is not None:\n assert p.weight_split_dims_mapping is not None\n assert p.activation_split_dims_mapping is not None\n\n if isinstance(p.weight_split_dims_mapping, dict):\n qkv_weight_split_dims_mapping = p.weight_split_dims_mapping['qkv']\n post_weight_split_dims_mapping = p.weight_split_dims_mapping['post']\n else:\n qkv_weight_split_dims_mapping = p.weight_split_dims_mapping\n post_weight_split_dims_mapping = p.weight_split_dims_mapping\n\n def ProjectInput(input_dim):\n return p.proj_tpl.Copy().Set(\n input_dim=input_dim,\n num_heads=p.num_heads,\n use_bias=p.use_bias,\n device_mesh=p.device_mesh,\n weight_split_dims_mapping=qkv_weight_split_dims_mapping,\n make_output_proj_no_op=False)\n\n def ProjectInputOneStep(input_dim):\n return p.proj_tpl.Copy().Set(\n input_dim=input_dim,\n num_heads=p.num_heads,\n dim_per_head=self.dim_per_head * 3,\n use_bias=p.use_bias,\n device_mesh=p.device_mesh,\n weight_split_dims_mapping=qkv_weight_split_dims_mapping,\n make_output_proj_no_op=False,\n )\n\n if isinstance(p.input_dim, dict):\n key_input_dim = p.input_dim['key']\n value_input_dim = p.input_dim['value']\n query_input_dim = p.input_dim['query']\n assert key_input_dim, f'key_input_dim is {key_input_dim}'\n assert query_input_dim, f'query_input_dim is {query_input_dim}'\n else:\n key_input_dim = p.input_dim\n value_input_dim = p.input_dim\n query_input_dim = p.input_dim\n\n if p.enable_value_proj and p.enable_qkv_proj_in_onestep:\n self.CreateChild('qkv', ProjectInputOneStep(key_input_dim))\n else:\n self.CreateChild('key', ProjectInput(key_input_dim))\n self.CreateChild('query', ProjectInput(query_input_dim))\n if p.enable_value_proj:\n assert value_input_dim, f'value_input_dim is {value_input_dim}'\n self.CreateChild('value', ProjectInput(value_input_dim))\n if p.enable_query_scale and p.enable_per_dim_scale:\n self.CreateChild(\n 'per_dim_scale',\n PerDimScaleLayer.Params().Set(dim=p.proj_tpl.dim_per_head))\n self.CreateChild('atten_dropout',\n p.dropout_tpl.Set(keep_prob=1.0 - p.atten_dropout_prob))\n # Setting is_output_projection=True to set the projection direction\n # from hidden dim to input dim. Output projection follows query_input_dim.\n self.CreateChild(\n 'post',\n p.proj_tpl.Copy().Set(\n input_dim=p.output_dim or query_input_dim,\n num_heads=p.num_heads,\n is_output_projection=True,\n use_bias=p.use_bias,\n device_mesh=p.device_mesh,\n weight_split_dims_mapping=post_weight_split_dims_mapping))\n\n if p.rope_tpl:\n assert issubclass(p.rope_tpl.cls, layers.RotaryPositionalEmbeddingLayer)\n rope_p = p.rope_tpl.Copy()\n if rope_p.embedding_dim == 0:\n rope_p.embedding_dim = self.dim_per_head\n self.CreateChild('rope', rope_p)\n\n if p.attn_add_memory:\n assert p.memory_tpl is not None\n self.CreateChild(\n 'lsh_mem',\n p.memory_tpl.Copy().Set(\n input_dim=self.dim_per_head,\n output_dim=self.dim_per_head,\n name='attn_lsh_mem'))\n if p.use_scale_invariant_atten:\n assert not (p.enable_scaling_code_motion or p.atten_extra_logit)", "def solve_all_stages(stages, objects_dic, predicates_rules, gstate, actionlist, problem_dic):\n\n result = {}\n result[\"visualStages\"] = []\n for stage in stages:\n\n stage_dic = {}\n object_dic_copy = copy.deepcopy(objects_dic)\n predicates = stage[\"items\"]\n sorted_predicates = priority(predicates, predicates_rules)\n\n # For hanoi problem, reset each stage\n # For logistics problem, reset each stage\n for fname in gstate[\"reset_function\"]:\n gstate[fname] = {}\n solvepredicates(sorted_predicates, object_dic_copy, predicates_rules, gstate)\n stage_dic[\"visualSprites\"] = object_dic_copy\n if \"stageName\" not in stage:\n stage_dic[\"stageName\"] = \"Inital Stage\"\n stage_dic[\"stageInfo\"] = \"No step information\"\n\n else:\n stage_dic[\"stageName\"] = stage[\"stageName\"]\n stage_dic[\"stageInfo\"] = stage[\"stageInfo\"]\n\n result[\"visualStages\"].append(stage_dic)\n\n result[\"subgoals\"] = Subgoal.get_subgoal(stages, problem_dic[1]['goal'].copy(), actionlist.copy())\n\n return result", "def graph_metrics(subjects,task,atlas,run_version,project='hcp',run=False,scrubbed=False,homedir=homedir):\n\tif run == False:\n\t\t# done_subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_%s.npy' %(project,task,atlas,run_version)) \n\t\t# assert (done_subjects == subjects).all() #make sure you are getting subjects / subjects order you wanted and ran last time.\n\t\tsubject_pcs = np.load('%sdynamic_mod/results/%s_%s_%s_pcs_%s.npy' %(homedir,project,task,atlas,run_version)) \n\t\tsubject_wmds = np.load('%sdynamic_mod/results/%s_%s_%s_wmds_%s.npy' %(homedir,project,task,atlas,run_version)) \n\t\tsubject_mods = np.load('%sdynamic_mod/results/%s_%s_%s_mods_%s.npy' %(homedir,project,task,atlas,run_version)) \n\t\ttry:\n\t\t\tsubject_communities = np.load('%sdynamic_mod/results/%s_%s_%s_coms_%s.npy' %(homedir,project,task,atlas,run_version)) \n\t\texcept:\n\t\t\tsubject_communities = np.load('%sdynamic_mod/results/%s_%s_%s_coms_fz_wc.npy' %(homedir,project,task,atlas)) \n\t\tmatrices = np.load('%sdynamic_mod/results/%s_%s_%s_matrices_%s.npy' %(homedir,project,task,atlas,run_version)) \n\t\tthresh_matrices = np.load('%sdynamic_mod/results/%s_%s_%s_z_matrices_%s.npy' %(homedir,project,task,atlas,run_version))\n\t\tfinished_subjects = np.load('%sdynamic_mod/results/%s_%s_%s_subs_%s.npy' %(homedir,project,task,atlas,run_version))\n\telif run == True:\n\t\tfinished_subjects = []\n\t\tvariables = []\n\t\tmatrices = []\n\t\tthresh_matrices = []\n\t\tfor subject in subjects:\n\t\t\ts_matrix = []\n\t\t\tif scrubbed == True:\n\t\t\t\tfiles = glob.glob('%sdynamic_mod/%s_matrices/%s_%s_*%s*_matrix_scrubbed_0.2.npy'%(homedir,atlas,subject,atlas,task)) # FOR SCRUBBING ONLY\n\t\t\tif scrubbed == False:\n\t\t\t\tfiles = glob.glob('%sdynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%(homedir,atlas,subject,atlas,task))\n\t\t\tfor f in files:\n\t\t\t\tif scrubbed == True:\n\t\t\t\t\t# FOR SCRUBBING ONLY\n\t\t\t\t\tdis_file = run_fd(subject,'_'.join(f.split('/')[-1].split('_')[2:5]))\n\t\t\t\t\tremove_array = np.zeros(len(dis_file))\n\t\t\t\t\tfor i,fdf in enumerate(dis_file):\n\t\t\t\t\t\tif fdf > .2:\n\t\t\t\t\t\t\tremove_array[i] = 1\n\t\t\t\t\t\t\tif i == 0:\n\t\t\t\t\t\t\t\tremove_array[i+1] = 1\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tif i == len(dis_file)-1:\n\t\t\t\t\t\t\t\tremove_array[i-1] = 1\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tremove_array[i-1] = 1\n\t\t\t\t\t\t\tremove_array[i+1] = 1\n\t\t\t\t\tif len(remove_array[remove_array==1])/float(len(remove_array)) > .75:\n\t\t\t\t\t\tcontinue\n\t\t\t\tf = np.load(f)\n\t\t\t\t1/0\n\t\t\t\tnp.fill_diagonal(f,0.0)\n\t\t\t\tf[np.isnan(f)] = 0.0\n\t\t\t\tf = np.arctanh(f)\n\t\t\t\ts_matrix.append(f.copy())\n\n\t\t\tif len(s_matrix) == 0:\n\t\t\t\tcontinue\n\t\t\ts_matrix = np.nanmean(s_matrix,axis=0)\n\t\t\tvariables.append([subject,atlas,task,s_matrix.copy()])\n\t\t\tnum_nodes = s_matrix.shape[0]\n\t\t\tthresh_matrix = s_matrix.copy()\n\t\t\tthresh_matrix = scipy.stats.zscore(thresh_matrix.reshape(-1)).reshape((num_nodes,num_nodes))\n\t\t\tthresh_matrices.append(thresh_matrix.copy())\n\t\t\tmatrices.append(s_matrix.copy())\n\t\t\tfinished_subjects.append(subject)\n\t\tsubject_mods = [] #individual subject modularity values\n\t\tsubject_pcs = [] #subjects PCs\n\t\tsubject_wmds = []\n\t\tsubject_communities = []\n\t\tassert len(variables) == len(finished_subjects)\n\t\tprint 'Running Graph Theory Analyses'\n\t\tfrom multiprocessing import Pool\n\t\tpool = Pool(18)\n\t\tresults = pool.map(individual_graph_analyes_wc,variables)\t\t\n\t\tfor r,s in zip(results,finished_subjects):\n\t\t\tsubject_mods.append(np.nanmean(r[0]))\n\t\t\tsubject_pcs.append(r[1])\n\t\t\tsubject_wmds.append(r[2])\n\t\t\tsubject_communities.append(r[3])\n\t\t\tassert r[4] == s #make sure it returned the order of subjects/results correctly\n\t\tnp.save('%sdynamic_mod/results/%s_%s_%s_pcs_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_pcs))\n\t\tnp.save('%sdynamic_mod/results/%s_%s_%s_wmds_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_wmds))\n\t\tnp.save('%sdynamic_mod/results/%s_%s_%s_mods_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_mods))\n\t\tnp.save('%sdynamic_mod/results/%s_%s_%s_subs_%s.npy' %(homedir,project,task,atlas,run_version),np.array(finished_subjects))\n\t\tnp.save('%sdynamic_mod/results/%s_%s_%s_matrices_%s.npy'%(homedir,project,task,atlas,run_version),np.array(matrices))\n\t\tnp.save('%sdynamic_mod/results/%s_%s_%s_coms_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_communities)) \n\t\tnp.save('%sdynamic_mod/results/%s_%s_%s_z_matrices_%s.npy'%(homedir,project,task,atlas,run_version),np.array(thresh_matrices))\n\tsubject_mods = np.array(subject_mods)\n\tsubject_pcs = np.array(subject_pcs)\n\tsubject_wmds = np.array(subject_wmds)\n\tsubject_communities = np.array(subject_communities)\n\tmatrices = np.array(matrices)\n\tthresh_matrices = np.array(thresh_matrices)\n\tresults = {}\n\tresults['subject_pcs'] = subject_pcs\n\tresults['subject_mods'] = subject_mods\n\tresults['subject_wmds'] = subject_wmds\n\tresults['subject_communities'] = subject_communities\n\tresults['matrices'] = matrices\n\tdel matrices\n\tresults['z_scored_matrices'] = thresh_matrices\n\tresults['subjects'] = finished_subjects\n\tdel thresh_matrices\n\treturn results", "def num_seq_dep_stages(self):\n n_s = [0]*len(self)\n for i in range(len(self)):\n for j in range(i):\n if self.A[i,j] != 0:\n n_s[i] = max(n_s[i], n_s[j]+1)\n\n n = 0\n for i in range(len(self)):\n if self.b[i] != 0:\n n = max(n, n_s[i]+1)\n return n" ]
[ "0.57785887", "0.5777118", "0.5627112", "0.5470846", "0.5466452", "0.5450876", "0.5448035", "0.5408511", "0.53821594", "0.5357281", "0.5329908", "0.5319501", "0.5319265", "0.531267", "0.5305732", "0.5291986", "0.5286079", "0.5273258", "0.52633226", "0.52444196", "0.5243653", "0.52378446", "0.52340585", "0.5206", "0.52038884", "0.5202139", "0.5173996", "0.5157871", "0.5149345", "0.51419914", "0.51419914", "0.51202315", "0.5113212", "0.51033133", "0.510204", "0.5099352", "0.50810397", "0.5080555", "0.5070293", "0.50689733", "0.506111", "0.5057782", "0.50518566", "0.50372237", "0.50370467", "0.49956605", "0.49942973", "0.4991791", "0.49841562", "0.49782434", "0.49769115", "0.49650562", "0.4958833", "0.49565914", "0.4944516", "0.49413654", "0.49401546", "0.4932356", "0.49236742", "0.4909033", "0.4907871", "0.49071294", "0.49061397", "0.49027812", "0.4901951", "0.49006054", "0.4892084", "0.4889993", "0.48801407", "0.48759177", "0.48716313", "0.48596916", "0.48584223", "0.48525462", "0.4848783", "0.48431814", "0.48428124", "0.48318753", "0.48272547", "0.4825883", "0.480586", "0.48017916", "0.47994438", "0.47952184", "0.47936666", "0.47907716", "0.47880358", "0.47833845", "0.4783071", "0.47827715", "0.4780478", "0.47794446", "0.47751483", "0.4765272", "0.47641778", "0.4762816", "0.47591785", "0.47583532", "0.47581854", "0.475111" ]
0.52850324
17
Count the number of times elem appears in the reversed iterator.
def count(self, elem): return self.iter.count(elem)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sequence_sorted_count(self, x, reverse=False):\n c = 0\n if reverse: it = reversed(self)\n else: it = iter(self)\n for v in it:\n if x == v:\n c += 1\n break\n for v in it:\n if x == v: c += 1\n else: break\n return c", "def count(self):\n\n count = 0\n x = self.begin\n\n if self.begin == self.end == None:\n return 0\n\n elif self.begin == self.end:\n return 1\n\n else:\n while x:\n count += 1\n x = x.next\n\n return count", "def count(self, element):\n count = 0\n for i in range(self._length): # Increment count when equal value is found\n if self._arr[i] == element:\n count += 1\n return count", "def count(self, elem):\n if not self.step:\n return _coconut.float(\"inf\") if elem == self.start else 0\n return int(elem in self)", "def count(self):\n node = self.head\n i = 0\n while node:\n i += 1\n node = node.next\n\n return i", "def get_count(self):\n count = 0\n temp = self.head\n while temp:\n count += 1\n temp = temp.next\n return count", "def leniter(i):\n return sum(1 for e in i)", "def __len__(self):\n if not self.head:\n return 0\n if not self.head.next:\n return 1\n N, tort, hare = 1, self.head.next, self.head.next.next\n while tort and (tort is not hare):\n N += 1\n tort = tort.next\n if hare and hare.next:\n hare = hare.next.next\n return N", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')", "def count(iterable):\n\treturn sum(1 for _ in iterable)", "def count(self, i):\n return sum([1 for j in self if i==j])", "def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count", "def iter_count(self):\n return self._iter_count", "def __len__(self):\n i = 0\n for S in self.states():\n i += 1\n return i", "def count(self):\n return sum(1 for _ in self)", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def len(self):\n count = 0\n temp = self.head\n while temp.next!=None:\n count += 1\n temp = temp.next\n return(count)", "def length(self): # Class O(n)\r\n h = self.head\r\n size = 1\r\n while 'next' in dir(h.next):\r\n size += 1\r\n h = h.next\r\n return size", "def count(iterable):\n return sum(1 for _ in iterable)", "def cursor_nelements(cursor):\n\tcount = 0\n\tfor data in cursor:\n\t\tcount += 1\n\treturn count", "def get_number_of_inversions_naive(self, lst):\r\n # Running time: O(n ** 2)\r\n count_inv = 0\r\n \r\n for i in range(len(lst)):\r\n for j in range(i+1, len(lst)):\r\n if lst[i] > lst[j]:\r\n count_inv += 1\r\n \r\n return count_inv", "def count(seq):\n\treturn sum(1 for x in seq)", "def __count_inversions(puzzle):\n puzzleLength = len(puzzle)\n count = 0\n for i in range(puzzleLength):\n for j in range(i + 1, puzzleLength):\n if(puzzle[i] > puzzle[j]):\n count += 1\n return count", "def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def index(self, elem):\n return _coconut.len(self.iter) - self.iter.index(elem) - 1", "def invserion_count(board : list) -> int:\n inv_count = 0\n board_len = len(board)\n for i in range(board_len):\n for j in range(i+1,board_len):\n if board[i] and board[j] and board[i] >= board[j]:\n inv_count += 1\n return inv_count", "def size(self):\n traverse = self.head\n count = 0\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count + 1", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur", "def count(self):\n return len(self._elements)", "def _getLenRecur(self, el):\n retval = 0\n if isinstance(el,list) or isinstance(el,tuple):\n retval = 1 + self._getLenRecur(el[0])\n return retval", "def size(self):\n current = self.head\n counter = 0\n while current is not None:\n counter += 1\n current = current.next\n return counter", "def __len__(self, count=0):\n return len(self.next(), count+1) if self.next() else count", "def size(self):\n traverse = self.head\n count = 1\n while traverse.next != None:\n traverse = traverse.next\n count += 1\n return count", "def _count_elements(mapping, iterable): # real signature unknown; restored from __doc__\n pass", "def length(self):\n if self.head:\n count = 1\n current = self.head\n while(current.next != self.head):\n\tcount+=1\n\tcurrent = current.next\n return count\n else:\n return 0", "def size(self):\n\n count = 0\n\n temp = self.head\n while temp is not None:\n count += 1\n temp = temp.next\n\n return count", "def size(self):\n\n size = 1\n traverse = self.front\n if self.front == None:\n return 0\n\n while traverse.next != None:\n traverse = traverse.next\n size += 1\n return size", "def size(self):\n\n size = 1\n traverse = self.front\n if self.front == None:\n return 0\n\n while traverse.next != None:\n traverse = traverse.next\n size += 1\n return size", "def count(iterable):\n return sum(1 for whatever in iterable)", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def num_of_ele(self, ele):\n return self.ele_freqs[ele]", "def count(self):\n return len(self.order_lst)", "def count(iterable: Iterable) -> int:\n return sum(1 for x in iterable)", "def get_length(self):\n pointer = self.head\n counter = 0\n while pointer:\n counter += 1\n pointer = pointer.next_node\n return counter", "def count(x):\n return sum(len(y) for y in x)", "def __len__(self):\n return self.last - self.first + 1", "def __numHeads(self):\n count = 1\n\n while (self.__coinFlip() == 1):\n count += 1\n return count", "def size(self):\n count = 0\n current = self.front\n\n while current is not None:\n current = current.getPtr()\n count += 1\n\n return count", "def count(self):\n return len([i for i in self.iteritems()])", "def element_count(self):\n return self._internal.get_element_count()", "def count_element (input_list):\n counter = 0\n for dummy_i in input_list:\n counter +=1\n return counter", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def count(s, value):\n total, index = 0, 0\n while index < len(s):\n element = s[index]\n if element == value:\n total += 1\n index += 1\n return total", "def count_if(self, criteria):\n # set count to 0\n count = 0\n # iterate through nodes in deque\n for item in self:\n # if the node's data meets the criteria passed,\n if criteria(item):\n # increment count\n count += 1\n # return the count\n return count", "def count(self, item: Any) -> int:\n # If this recursive list is empty\n if self.is_empty():\n return 0\n # If there is a first and a rest.\n else:\n # Check if the first is equal and add the count on the rest of the list.\n return int(self._first == item) + self._rest.count(item)", "def index(self, data):\n\n traverse = self.head\n count = 0\n while traverse.next != None:\n\n if traverse.data == data:\n return count\n traverse = traverse.next\n count += 1\n\n if traverse.data == data:\n return count", "def size(self):\n if self.empty():\n count = 0\n else:\n n = self.head\n count = 1\n while n.next is not None:\n count += 1\n n = n.next\n return count", "def __len__(self):\n i = -1\n # TODO(user):pytype: type checker doesn't treat self as iterable.\n for i, _ in enumerate(self): # pytype: disable=wrong-arg-types\n pass\n\n return i + 1", "def count(self):\r\n return self.count_helper(self.top_node)", "def generator_len(self, it):\n return len(list(it))", "def count_adjacent_repeats(s):\n\n\t# stores number of adjacent repeats found\n\trepeats = 0\n\n\tfor i in range(len(s) - 1):\n\t\t# compare the char at index i with the one after it\n\t\tif s[i] == s[i + 1]:\n\t\t\trepeats += 1\n\treturn repeats", "def length(self, head):\n count = 0\n while head:\n count += 1\n head = head.next\n return count", "def element_count(self):\n return len(self.elements) + len(self.virtual_elements)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def rev_counter(matrix, res):\n while True:\n a, b = (yield)\n res[0] += matrix[pos[a]][pos[b]]\n res[1] += 1", "def get_num_values(self, item):\n\tnum_values = 1\n\t\n\t# Valor mas antiguo de la linked list\n\t# Siempre tiene valor, si no, no tenemos la entrada en el hashset\n\tvalue = item[\"tail\"][\"next\"]\n \twhile long(value) != 0:\n\t num_values += 1\n\t value = value[\"next\"]\n\n\treturn num_values", "def count_for(s, value):\n total = 0\n for elem in s:\n if elem == value:\n total = total + 1\n return total", "def count_while(s, value):\n total, index = 0, 0\n while index < len(s):\n if s[index] == value:\n total = total + 1\n index = index + 1\n return total", "def __len__(self):\n return len(self.iterator)", "def cnt(iterable):\n return len(iterable) if iterable is not None else 0", "def __len__(self) -> int:\n # base case\n if self._first is None:\n return 0\n elif self._rest is None:\n return 1\n # recursive case\n else:\n return 1 + len(self._rest)", "def get_length(self):\n current_node = self.head\n if current_node:\n i = 1\n while current_node.next:\n current_node = current_node.next\n i += 1\n return i\n else:\n return 0", "def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))", "def count(seq, predicate):\n count = 0\n for item in seq:\n if predicate(item):\n count += 1\n return count", "def atom_count(self):\n return len(self.repeated_elements())", "def size(self):\n current = self.__head\n count = 0\n while current:\n count += 1\n current = current.next_node\n return count", "def __len__(self):\n return len(self._previous_values[self.last_index:])", "def elements_count(self):\n return self.__elements_count", "def get_iter_num(self):\n\tif len(self.cost) > 0:\n first_key = list(self.cost.keys())[0]\n num = len(self.cost[first_key]) - 1\n\telse:\n\t first_key = list(self.prim_var.keys())[0]\n num = len(self.prim_var[first_key]) - 1\n\treturn num", "def get_num_applies(self):\n ops = 0\n for _, remainder, _ in self:\n ops += len(remainder)\n return ops", "def count(self, value):\n self.__validate_value(value)\n counter = 0\n for v in self.__list:\n if v == value:\n counter += 1\n return counter", "def count_fragments(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_fragments()\n return n", "def count_fragments(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_fragments()\n return n", "def counter(self) -> int:", "def counter(self) -> int:", "def count_unvisited(data):\n count = sum(n.count(\"n\") for n in data)\n return count", "def __len__(self):\n return sum(abs(j) for (i, j) in self)", "def getNumElements(self):\n return 1 + sum(m.getNumElements() for m in self.members)", "def findLengthOfLoopInList(self):\n\t\tslow = self.head\n\t\tif slow is None:\n\t\t\treturn 0\n\t\tfast = self.head\n\t\tlength = 0\n\t\twhile (fast is not None) and ((slow != fast) or (length == 0)):\n\t\t\tslow = slow.getNext()\n\t\t\tfast = fast.getNext()\n\t\t\tif fast is None:\n\t\t\t\tbreak\n\t\t\tfast = fast.getNext()\n\t\t\tlength += 1\n\n\t\tif (slow == fast) and (length > 0):\n\t\t\tslow = self.head\n\t\t\twhile slow != fast:\n\t\t\t\tslow = slow.getNext()\n\t\t\t\tfast = fast.getNext()\n\t\t\tlength = 0\n\t\t\twhile (slow != fast) or (length == 0):\n\t\t\t\tfast = fast.getNext()\n\t\t\t\tlength += 1\n\t\t\treturn length\n\t\telse:\n\t\t\treturn 0", "def __len__(self):\n # TODO: complete this function!\n if self.is_empty():\n return 0\n else:\n return 1 + len(self._rest)", "def size(self):\n count = 0\n current = self.head\n if self.head is None:\n return 0\n while current.next_node is not None:\n count += 1\n current = current.next_node\n return count + 1", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def __len__(self):\n if self.length is None:\n self.length = 0\n for _ in self:\n self.length += 1\n\n return self.length", "def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))", "def countOccurrences(lst, x):\n res = 0\n for i in lst:\n if i == x:\n res += 1\n return res", "def count(pred, l):\n nl = [i for i in range(0,len(l)) if pred(l[i])]\n\n return len(nl)" ]
[ "0.710874", "0.67030925", "0.65069044", "0.6468267", "0.6360811", "0.6293502", "0.62844634", "0.6255483", "0.62217087", "0.6204283", "0.61864936", "0.6095033", "0.6027348", "0.5992894", "0.5981212", "0.59621656", "0.59607416", "0.5958107", "0.59503806", "0.593788", "0.59354264", "0.59248734", "0.59003836", "0.5895588", "0.5893935", "0.5893935", "0.58929145", "0.58881074", "0.5884157", "0.58774316", "0.5863891", "0.5860454", "0.5851347", "0.58471847", "0.57883793", "0.57847524", "0.57835406", "0.5769787", "0.576948", "0.576948", "0.576323", "0.5761551", "0.57600063", "0.5757082", "0.57537466", "0.5746538", "0.574274", "0.5742196", "0.57300776", "0.5726425", "0.57170194", "0.57046294", "0.5700126", "0.5689416", "0.5683176", "0.5682705", "0.56599087", "0.5650826", "0.56338364", "0.56246275", "0.5622487", "0.56217974", "0.562018", "0.55948037", "0.5593854", "0.55917436", "0.55917436", "0.55707866", "0.55567336", "0.5554094", "0.55519646", "0.55481285", "0.55313265", "0.55235463", "0.5519993", "0.5511891", "0.54992986", "0.5499248", "0.5494486", "0.5491945", "0.54893947", "0.5484074", "0.54833984", "0.5480567", "0.54634917", "0.54634917", "0.5456496", "0.5456496", "0.54497087", "0.5449204", "0.54487556", "0.54400426", "0.5437274", "0.54325974", "0.54318404", "0.54318404", "0.54318404", "0.54286104", "0.54265386", "0.5424939" ]
0.70186836
1
Find the index of elem in the reversed iterator.
def index(self, elem): return _coconut.len(self.iter) - self.iter.index(elem) - 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def r_index(sequence, element):\n\n for i, e in enumerate(reversed(sequence)):\n if element == e:\n return len(sequence) - 1 - i\n else:\n raise ValueError(\"r_index(sequence, element):\\\n element not in the sequence\")", "def index(self, elem):\n pointer = self.head\n i = 0\n while (pointer):\n if pointer.data == elem:\n return i\n pointer = pointer.next\n i += 1\n raise ValueError(\"{} is not in list\".format(elem))", "def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)", "def index(self, elem):\n ponteiro = self.inicio\n i = 0\n while(ponteiro):\n if ponteiro.dado == elem:\n return i\n ponteiro = ponteiro.prox\n i = i + 1\n raise ValueError(\"{} is not in list\".format(elem))", "def reverse_linear_search(lst, value):\n i = len(lst) - 1\n while i != -1 and lst[i] != value:\n i = i + 1\n if i == -1:\n return -1\n else:\n return i", "def find_index(arr, pred):\n for index, elem in enumerate(arr):\n if pred(elem):\n return index\n return -1", "def get_right_index(i):\n pos = i + 1\n right_pos = 2 * pos + 1\n right_index = right_pos - 1\n return right_index", "def findlastindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)", "def search(elements_list, element):\n for index, item in enumerate(elements_list):\n if item == element:\n return index\n return -1", "def index(self, elem):\n if elem not in self:\n raise _coconut.ValueError(_coconut.repr(elem) + \" not in \" + _coconut.repr(self))\n return (elem - self.start) // self.step if self.step else 0", "def get_element_index(el, elements):\n for idx, element in enumerate(elements):\n diff = torch.sum(torch.abs(el - element))\n if diff.item() < 1e-8:\n return idx\n return None", "def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1", "def index(self, item: Any) -> int:\n index_so_far = 0\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return index_so_far\n index_so_far += 1\n curr = curr.next\n raise ValueError", "def last_index(self, item):\n return _(self.size()._ - 1 - self.reverse().index(item)._)", "def f_index(self, substring, direction=[]):\n substr = self.value(substring)\n if \"back\" in direction:\n pos = self._val.rfind(substr)\n else:\n pos = self._val.find(substr)\n\n return pos + 1", "def get_index(self, child):\n for _index, item in enumerate(self.children):\n if item == child:\n return _index\n\n return -1", "def element_index(self):\n return self._index", "def find(self, data):\n index = 0\n current = self.head\n while current:\n if current.data == data:\n return index\n index += 1\n current = current.next\n\n return -1", "def indexMatching(seq, condition):\n for i,x in enumerate(seq):\n if condition(x):\n return i\n return -1", "def search_for_nums(data):\n index = None\n for i in range(len(data)-1,0, -1): #count backwards through the loop\n if data[i] != None: #found most recent input\n print(\"index found...data: %s\" % (data[i]))\n return i\n #END IF\n #END FOR\n return index", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def index(self, item):\n \"\"\"\n :type item: Node\n :rtype int\n \"\"\"\n curr = self.head\n idx = 0\n while curr:\n if item == curr.getData():\n break\n idx += 1\n curr = curr.getNext()\n return idx", "def edge_index(indexed_triangle, edge):\n for i in range(3):\n triangle_edge = indexed_triangle[(i + 1) % 3], indexed_triangle[(i + 2) % 3]\n if triangle_edge == edge:\n return i\n triangle_edge = triangle_edge[1], triangle_edge[0]\n if triangle_edge == edge:\n return i\n # Edge not found in triangle\n assert False", "def find_index(numbers, element):\n index = 0\n for item in numbers:\n if element != item:\n index += 1\n elif element == item:\n return index", "def _find_position(self, element):\n walk = self._data.first()\n while walk is not None and walk.element()._value != element:\n walk = self._data.after(walk)\n return walk", "def index(self, item):\n\t\ti = 0\t\t\n\t\tif not self.len:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\treturn i\n\t\tactual = self.prim\n\t\twhile actual and actual.dato != item:\n\t\t\tactual = actual.prox\n\t\t\ti += 1\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\treturn i", "def index(self, data):\n\n traverse = self.head\n index = 0\n while traverse.next != None:\n\n if traverse.data == data:\n return index\n traverse = traverse.next\n index += 1\n\n if traverse.data == data:\n return index", "def find_index(draw, urn):\n i = 0\n while draw >= 0:\n draw -= urn[i]\n i+=1\n return i-1", "def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index", "def index(self, pos):\n for i, n in enumerate(self):\n if i == pos: return n\n raise Exception('Index out of bounds.')", "def _first_index_with_bigger_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i", "def index_equals_value_search1(arr):\n for key, value in enumerate(arr):\n if value == key:\n return value\n return -1", "def last_index_of(my_list, my_value):\n return len(my_list) - my_list[::-1].index(my_value)", "def FindPositonOfElement(self, element):\r\n for i in range(len(self.__listOfElements)):\r\n existingElement = self.__listOfElements[i]\r\n if (existingElement == element):\r\n return i\r\n raise RepositoryError(\"Inexisting Element\")", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def index(self, item):\n \n pos = 0\n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return pos\n else:\n current = current.get_next()\n pos += 1\n \n raise ValueError('{} is not in list'.format(item))", "def getIndex(self, child):\n \n if child in self._children:\n return self._children.index(child)\n else:\n return -1", "def linear_search(lst, value):\n i = 0\n while i != len(lst) and lst[i] != value:\n i = i + 1\n if i == len(lst):\n return -1\n else:\n return i", "def index_where(iterable, pred):\n # type: (Iterable[T], Callable[[T], bool]) -> Optional[int]\n for i, el in enumerate(iterable):\n if pred(el):\n return i\n return None", "def right_child_idx(idx):\n return (idx + 1) << 1", "def find_position(self, val):\n edges = np.array(self.cell_edges)\n if val in edges:\n index = np.searchsorted(edges, val)\n return index, index\n else:\n edges -= val\n if edges[0] > 0:\n return -1, 0\n if edges[-1] < 0:\n return 0, -1\n index = 0\n for i, e in enumerate(edges):\n if e > 0:\n index = i\n break\n return index - 1, index", "def search(A, v):\r\n\tfor i in range(0, len(A)):\r\n\t\tif A[i] == v:\r\n\t\t\treturn i", "def _find_nav_index(self, needle):\r\n def matches(needle, nav):\r\n if type(needle) is str and type(nav) is str:\r\n return needle == nav\r\n elif type(needle) is tuple and type(nav) is tuple:\r\n return needle[0] == nav[0]\r\n return False\r\n\r\n idx = 0\r\n for nav in self._navigation:\r\n if matches(needle, nav):\r\n return idx\r\n idx += 1\r\n return None", "def indexOf(Paire,element) :\n index = -1\n if(Paire[1][0]==element):\n index = 0\n elif(Paire[1][1]==element):\n index = 1\n else :\n index = 2\n return(index)", "def i (self):\n\n return self.end - 1", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def search_entry_equal_to_its_index(A):\n L, R = 0, len(A) -1\n\n while L <= R:\n M = L + (R - L) // 2\n if A[M] > M:\n R = M - 1\n elif A[M] == M:\n return M\n else:\n # A[M] < M\n L = M + 1\n\n return -1", "def binary_search(elem: int, arr: Sequence) -> Optional[int]:\n if not arr:\n return None\n f_ind = 0\n l_ind = len(arr) - 1\n\n mid_ind = len(arr) // 2\n while arr[f_ind] != elem and f_ind <= l_ind:\n if elem > arr[mid_ind]:\n f_ind = mid_ind + 1\n else:\n l_ind = mid_ind - 1\n mid_ind = (f_ind + l_ind) // 2\n if arr[f_ind] == elem:\n return f_ind\n else:\n return None", "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def sublist_index(haystack, needle):\n try:\n for i in xrange(len(haystack)):\n if haystack[i:i+len(needle)] == needle:\n return i\n except IndexError:\n pass\n raise ValueError", "def _find_position(self, e):\n walk = self._data.first()\n while walk is not None and walk.element()._value != e:\n walk = self._data.after(walk)\n \n return walk", "def get_index(line):\n for dummy_i in range(0,len(line) - 1):\n if line[dummy_i] !=0 and line[dummy_i] == line[dummy_i+1]:\n return dummy_i", "def Find_the_last_index(A, target):\n if len(A) == 0:\n return -1\n begin = 0\n end = len(A) - 1\n while end - begin > 1:\n mid = begin + (end - begin >> 1)\n if target < A[mid]:\n end = mid\n else:\n begin = mid\n if A[end] == target:\n return end\n elif A[begin] == target:\n return begin\n else:\n return -1", "def right(self, index):\n try:\n if index == self.root_index():\n index = self.adjacency_list[index][1]\n else:\n index = self.adjacency_list[index][2]\n return index\n except IndexError:\n return -1", "def __find_node_index(self, index):\n cur_index = 0\n cur_node = self.head\n prev_node = None\n while cur_node is not None:\n if index >= len(cur_node.data_list) + cur_index:\n cur_index += len(cur_node.data_list)\n prev_node = cur_node\n cur_node = cur_node.next_node\n else:\n index -= cur_index\n break\n return index, prev_node, cur_node", "def _get_lback_index(self, model, last) -> int:\n assert last > 0\n # last state cannot be loop-back.\n assert model.get_value(self.totime(self._in_loop, last)).is_true()\n assert model.get_value(self.totime(self._in_loop, 0)).is_false()\n idx = last - 1\n while model.get_value(self.totime(self._in_loop, idx)).is_true():\n idx -= 1\n assert idx >= 0\n assert model.get_value(self.totime(self._in_loop, idx + 1)).is_true()\n assert model.get_value(self.totime(self._in_loop, idx)).is_false()\n assert model.get_value(self.totime(self.start_loop, idx)).is_true()\n return idx", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def find(self,v):\n for i in range(len(self)):\n if near(self[i],v):\n return i\n return -1", "def index(liste, value):\n\n for ii in range(len(liste)):\n if liste[ii] == value:\n return ii\n return None", "def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"", "def linear_search(list, target):\n for i in range (0, len(list)):\n if list[i] == target:\n return i\n\n\n return None", "def binarysearch(a, i):\n l = 0\n r = len(a) - 1\n \n while l < r:\n m = l + (r-l)/2\n \n if a[m] == i:\n return m\n elif a[m] < i:\n l = m + 1\n else:\n r = m\n \n if l > 0 and abs(i - a[l-1]) < abs(i- a[l]):\n return l-1\n \n return l", "def linear_search(arr, x):\n for i in range(len(arr)):\n if arr[i] == x:\n return i\n \n return -1", "def index(self, element, start=0, end=None):\n if end is None: # Only bound end if value has been provided\n end = self._length\n if end < 0: # For negative indexing, convert to positive counterpart\n end = self._convert_negative_index(end)\n\n start = min(self._length, max(0, start)) # Place start in bounds if extreme\n end = min(self._length, max(0, end)) # Place end in bounds if extreme\n for i in range(start, end): # Search for element within bounds\n if self._arr[i] == element:\n return i\n raise ValueError(f'{element} not found in array') # Raise if element not found", "def offset(self, needle):\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n\n idx = self._seq.index(needle)\n if self._seq[idx+len(needle):].find(needle) != -1:\n raise ValueError(\"Multiple occurances found!\")\n\n return idx", "def _search(listing, absolute_idx):\n if not listing:\n return 0\n if len(listing) == 1:\n return 0 if absolute_idx <= listing[0] else 1\n\n for idx, line_break_idx in enumerate(listing):\n if line_break_idx >= absolute_idx:\n return idx", "def get_index_by_id(self, id):\r\n for i in range(len(self.vertices)):\r\n if self.vertices[i].id == id:\r\n return i\r\n raise ValueError('Reverse look up of id failed.')", "def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1", "def linear_search(alist, key):\n for i in range(len(alist)):\n if alist[i] == key:\n return i\n return -1", "def recursive_index(needle, haystack):\n\n def _recursive_index(needle, haystack, idx):\n\n if idx == len(haystack):\n return None\n\n if haystack[idx] == needle:\n return idx\n\n return _recursive_index(needle, haystack, idx + 1)\n\n return _recursive_index(needle, haystack, 0)", "def findRepIdx(self, rep, cell = 1):\n\n match = self.findRep(rep = rep, cell = cell)\n return np.arange(self.atoms.shape[0])[match]", "def array_search(haystack, needle):\n length = len(haystack)\n for i in range(length):\n if haystack[i] == needle:\n return i\n return -1", "def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", "def get_index(corners, i, jk):\n if type(jk) != list:\n jk = list(jk)\n assert corners.shape[1] == 3\n sol = np.where(np.bitwise_or(np.all(corners == [i] + jk, axis=1), \n np.all(corners == [i] + jk[::-1], axis=1)))[0]\n if len(sol) > 0: \n return sol[0]", "def get_curpos(self):\n for i in range(len(self.tree)):\n if self.path == self.tree[i][2]:\n return i\n else:\n return -1", "def stop(self):\n try:\n return self.index[-1]\n except:\n pass", "def get_reverse_position(self):\n return self._reverse_position", "def index(self, item):\n \n pos = 0\n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return pos\n # Early stop\n elif current.get_data() > item:\n raise ValueError('{} is not in list'.format(item))\n else:\n current = current.get_next()\n pos += 1\n \n raise ValueError('{} is not in list'.format(item))", "def findAt(self, index):\n saveCursor = self.cursor\n i = 0\n if self.first():\n if index == i:\n return self.cursor\n else:\n while self.next():\n i += 1\n if index == i:\n return self.cursor\n self.cursor = saveCursor\n return None", "def index(self, item: T) -> int:\n current = self.head\n index = 0\n while current is not None and current.item != item:\n current = current.link\n index += 1\n if current is None:\n raise ValueError(\"Item is not in list\")\n else:\n return index", "def _find_index(self, index, iimin=None, iimax=None):\n if iimin is None:\n aa = 0\n else:\n aa = iimin\n\n if iimax is not None:\n bb = iimax\n else:\n bb = len(self.index)-1\n \n # Check to see if the index is even in the range\n if bb < aa:\n return (False, aa)\n elif index <= self.index[aa]:\n return (index == self.index[aa], aa)\n elif index == self.index[bb]:\n return (True, bb)\n elif index > self.index[bb]:\n return (False, bb+1)\n \n # the value definitely lies inside the list, and it is neither aa\n # nor bb.\n while bb-aa>1:\n ii = (aa+bb)//2\n # Eventually, we'll hit the value\n if index == self.index[ii]:\n return (True, ii) \n elif index < self.index[ii]:\n bb = ii\n else:\n aa = ii\n # Unless the value isn't in the list.\n return (False, bb)", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def find_start(lines, start_str, reverse=True):\n start = -1\n # Iterate backwards until the last value is found\n if reverse:\n for i, line in reversed(list(enumerate(lines))):\n if start_str == line:\n return i\n else:\n for i, line in enumerate(lines):\n if start_str == line:\n return i\n return start", "def last_index(list_, value):\n\n found = None\n for index, val in enumerate(list_):\n if val == value:\n found = index\n if found is None:\n raise ValueError(\"{} is not in list {}\".format(value, list_))\n return found", "def point_in_arr(arr, point):\n for i in range(len(arr)):\n if arr[i][0] == point[0] and arr[i][1] == point[1]:\n return i\n return -1", "def find_idx(array, value):\n\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (\n idx == len(array)\n or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])\n ):\n return idx - 1\n else:\n return idx", "def find_indices(li, first_elt, second_elt):\r\n index1, index2 = li.index(first_elt), li.index(second_elt)\r\n if index1 == index2:\r\n index2 = index1 + 1 + li[index1+1:].index(second_elt)\r\n if index1 > index2:\r\n index1, index2 = index2, index1\r\n return (index1+1, index2+1)", "def get(self, index: int) -> int:\n if index < 0 or index >= self.size: return -1\n \n # choose search from head or tail\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else: # from tail\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val", "def set_indx(self, item):\n for i, s in enumerate(self._data):\n if item in s:\n return i\n return None", "def safe_index(l, e):\n try:\n return l.index(e)\n except:\n return len(l)", "def get_right_child_index(self):\n return (2 * self.index) + 2", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def get_pent_idx(pent):\n pidx = 0\n for i in range(pent.shape[0]):\n for j in range(pent.shape[1]):\n if pent[i][j] != 0:\n pidx = pent[i][j]\n break\n if pidx != 0:\n break\n if pidx == 0:\n return -1\n return pidx - 1", "def linear_search(arr: IntList, query: int) -> int:\n arr_len: int = len(arr)\n for idx in range(arr_len):\n if arr[idx] == query:\n return idx\n return -1", "def _first_index_with_smaller_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] <= P[i]:\n i -= 1\n return i", "def index(self):\n a = self.array_form\n\n return sum([j for j in xrange(len(a) - 1) if a[j] > a[j+1]])", "def _index(self) -> int:\n return -1", "def index(self, value, start=0, stop=-1):\n return self.elem.index(value, start, stop)" ]
[ "0.7337428", "0.7109907", "0.67639583", "0.6708626", "0.66251403", "0.6604515", "0.6553324", "0.65205306", "0.6481329", "0.6373556", "0.6335034", "0.6285708", "0.6278031", "0.6270351", "0.62547594", "0.6241182", "0.62061155", "0.6160364", "0.6146057", "0.61259276", "0.61254555", "0.61192966", "0.61157286", "0.610275", "0.61026233", "0.6048159", "0.6036537", "0.60223585", "0.5985957", "0.59512293", "0.5945609", "0.59365755", "0.59242463", "0.5923426", "0.5922246", "0.59207165", "0.5916585", "0.58940876", "0.58914506", "0.5881281", "0.5881052", "0.5861564", "0.5858105", "0.58334476", "0.58298594", "0.581331", "0.581331", "0.5810513", "0.5810165", "0.5806342", "0.57949835", "0.5788457", "0.57846993", "0.5783096", "0.57770425", "0.57696915", "0.5758961", "0.57554686", "0.57554686", "0.5750904", "0.5744651", "0.57395643", "0.57339174", "0.5733683", "0.57302547", "0.572482", "0.57204646", "0.5714387", "0.5701911", "0.5692887", "0.56884336", "0.5685814", "0.5678058", "0.5666088", "0.56630206", "0.56480527", "0.56414324", "0.5628798", "0.5626049", "0.56160504", "0.56117153", "0.5611371", "0.5610182", "0.56067955", "0.55857575", "0.5577897", "0.5576743", "0.55764747", "0.5576284", "0.5573275", "0.5570665", "0.5567858", "0.55677456", "0.5567209", "0.55661255", "0.5563859", "0.5563605", "0.55587995", "0.5553755", "0.55497944" ]
0.7336818
1
Count the number of times elem appears in the count.
def count(self, elem): if not self.step: return _coconut.float("inf") if elem == self.start else 0 return int(elem in self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self, elem):\n return self.iter.count(elem)", "def count(self, element):\n count = 0\n for i in range(self._length): # Increment count when equal value is found\n if self._arr[i] == element:\n count += 1\n return count", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def num_of_ele(self, ele):\n return self.ele_freqs[ele]", "def count(self):\n return sum(1 for _ in self)", "def count(self):\n return len(self._elements)", "def count_element (input_list):\n counter = 0\n for dummy_i in input_list:\n counter +=1\n return counter", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def count(x):\n return sum(len(y) for y in x)", "def count(iterable):\n\treturn sum(1 for _ in iterable)", "def element_count(self):\n return self._internal.get_element_count()", "def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur", "def count():", "def count(self, value):\n self.__validate_value(value)\n counter = 0\n for v in self.__list:\n if v == value:\n counter += 1\n return counter", "def count(iterable):\n return sum(1 for _ in iterable)", "def count(self, i):\n return sum([1 for j in self if i==j])", "def count(iterable: Iterable) -> int:\n return sum(1 for x in iterable)", "def getCount(self, event):\n # Attempt 2: Still too slow\n count = 0\n \n for mEvent in self:\n if event.__st__(mEvent):\n count += 1\n \n return count\n \n # Attempt 1: Too slow\n #return reduce((lambda x, y: x+y),\n # map((lambda i: itemset <= i), self))", "def countOccurrences(lst, x):\n res = 0\n for i in lst:\n if i == x:\n res += 1\n return res", "def count_occurrences(x):\r\n tmp_x = sorted(copy(x))\r\n ux = unique(x)\r\n return searchsorted(tmp_x, ux, 'right') - searchsorted(tmp_x, ux, 'left')", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def count_elements(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def countX(lst, x):\n return lst.count(x)", "def countX(lst, x):\n return lst.count(x)", "def _count_elements(mapping, iterable): # real signature unknown; restored from __doc__\n pass", "def atom_count(self):\n return len(self.repeated_elements())", "def count(self):\n return len([i for i in self.iteritems()])", "def get_element_count(data, element):\n task_count = 0\n while True:\n try:\n data[element+str(task_count)]\n task_count += 1\n except:\n break\n return task_count", "def count(s, value):\n total, index = 0, 0\n while index < len(s):\n element = s[index]\n if element == value:\n total += 1\n index += 1\n return total", "def count(self):\n # TODO not implemented yet\n return 0", "def count(seq):\n\treturn sum(1 for x in seq)", "def count(self,value = 1):\n n = 0\n for s in self.sample:\n if s == value:\n n += 1\n return n", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def count_for(s, value):\n total = 0\n for elem in s:\n if elem == value:\n total = total + 1\n return total", "def how_many(e, x):\n return count(np.asarray(x) == e)", "def elements_count(self):\n return self.__elements_count", "def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def test_element_count(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.element_count(2,\"F\"),6)", "def count(iterable):\n return sum(1 for whatever in iterable)", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def count() -> int:\n pass", "def _count_chars(self, element):\n if isinstance(element, list):\n with_counts = [self._count_chars(c)for c in element]\n # when counting we add a space for joining\n return with_counts, sum(n + 1 for c, n in with_counts) - 1\n else:\n return element, len(element)", "def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)", "def map_count_list(x,element) -> int:\n x_list=ast.literal_eval(str(x))\n value_count=0\n for item in x_list:\n if item[0]==element:\n value_count=item[1]\n return value_count", "def count(self, e):\n try:\n return self.vals[e]\n except:\n return 0", "def count(self):\n\n raise NotImplementedError", "def count(self):\r\n return self.count_helper(self.top_node)", "def count(self):\n return len(self.find())", "def count(self, character):\n c_list = [i for i in self.input_words]\n n = sum([True for char in c_list if char == character])\n return n", "def count(self, pset):\n return self._sets.count(pset)", "def count(array, value):\n count = 0\n for i in range (len(array)):\n if (array[i] == value):\n count += 1\n return count", "def solution_2(arr):\n total = 0\n for group in arr:\n group_list = []\n for person in group:\n group_list = group_list + person\n group_table = Counter(''.join(group_list))\n for k, v in group_table.items():\n if v == len(group):\n total += 1\n return total", "def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))", "def countElements(self, nums):\n import sys\n max_n = -sys.maxint\n min_n = sys.maxint\n\n for n in nums:\n max_n = max(n, max_n)\n min_n = min(n, min_n)\n\n count = 0\n for n in nums:\n if min_n < n < max_n:\n count += 1\n return count", "def element_count(self):\r\n result = conf.lib.clang_getNumElements(self)\r\n if result < 0:\r\n raise Exception('Type does not have elements.')\r\n\r\n return result", "def count_if(self, criteria):\n # set count to 0\n count = 0\n # iterate through nodes in deque\n for item in self:\n # if the node's data meets the criteria passed,\n if criteria(item):\n # increment count\n count += 1\n # return the count\n return count", "def count(self, value: object) -> int:\n count = 0\n for _ in range(self.da.length()):\n if self.da[_] == value:\n count += 1\n return count", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def counts(self):\n return sum(self.counter.values()), len(self.visited)", "def count(self):\n\n count = 0\n x = self.begin\n\n if self.begin == self.end == None:\n return 0\n\n elif self.begin == self.end:\n return 1\n\n else:\n while x:\n count += 1\n x = x.next\n\n return count", "def count(self, word):\n pass", "def occurence_(L,x):\r\n s=0\r\n for i in range(len(L)):\r\n if L[i]==x:\r\n s+=1\r\n return(s)", "def dcount(ev):\n profData = getProfilingData(ev)\n if profData is not None:\n a = profData.Descendants().AsArray()\n if len(a) > 0:\n return profData.DescendantCount(a[0])\n return \"\"", "def count(self, sub) -> int:\n pass", "def count(item):\n return len(item)", "def count(some_list):\n return len(some_list)", "def getNumElements(self):\n return 1 + sum(m.getNumElements() for m in self.members)", "def count(self, chromosome):\n return self.chromosome_list.count(to_chromosome(chromosome))", "def count(self) -> int:\n return self.__count", "def count(seq, predicate):\n count = 0\n for item in seq:\n if predicate(item):\n count += 1\n return count", "def element_count(self):\n return len(self.elements) + len(self.virtual_elements)", "def GetNumberOfElements(self, assoc):\n result = 0\n for dataset in self:\n result += dataset.GetNumberOfElements(assoc)\n return int(result)", "def count_anyone_answered(group: list) -> int:\n return len(functools.reduce(lambda a, b : a + b, [collections.Counter(answers) for answers in group]))", "def count(self, tokens):\n return self._count.get(tuple(tokens), 0)", "def freq(self) -> int:", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self, value):\n # Note: objects are never coerced into other types for comparison\n if type(value).__eq__ in _int__eq__s:\n return int(self._contains_int(value))\n # take the slow path, compare every single item\n return sum(1 for self_item in self if self_item == value)", "def frequency_counter(count):\n return counts(count)", "def Count(self, limit=None):\n if limit is None:\n count = 0\n for i in self.Run():\n count += 1\n return count\n else:\n return len(self.Get(limit))", "def totalOccurrences(word, words):\n totalCount = 0\n if word in words:\n for item in words[word]:\n totalCount += item.count\n return totalCount", "def count(self):\n return Library.functions.count(self._book)", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total", "def count(self, tokens):\n return self.counts[tokens]", "def count(self, tokens):\n return self._count[tuple(tokens)]", "def count_everyone_answered(group: list) -> int:\n return len(functools.reduce(lambda a, b : a & b, [collections.Counter(answers) for answers in group]))", "def count(self, value: object) -> int:\n # Initializes count to zero.\n count = 0\n # Loops through the indices of the dynamic array and if the value is found, the count increments.\n end = self.size()\n for ind in range(end):\n if self.da[ind] == value:\n count += 1\n # Count is returned.\n return count", "def count_element(A: List[int], x: int) -> int:\n min_index = find_min_index(A, x)\n max_index = find_max_index(A, x)\n\n return (max_index-min_index)+1", "def count(self, item):\n return _(self._.count(item))", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def count(self, item: Any) -> int:\n # If this recursive list is empty\n if self.is_empty():\n return 0\n # If there is a first and a rest.\n else:\n # Check if the first is equal and add the count on the rest of the list.\n return int(self._first == item) + self._rest.count(item)", "def count(self):\n \n return self._count" ]
[ "0.8477194", "0.81752723", "0.71697414", "0.71684057", "0.71658355", "0.712942", "0.7125302", "0.7106333", "0.70579606", "0.70207214", "0.69894207", "0.69779223", "0.69651765", "0.69367886", "0.6842948", "0.6840524", "0.6828532", "0.6813907", "0.6807838", "0.68026924", "0.67927694", "0.67927694", "0.6765389", "0.6765389", "0.6765389", "0.6765389", "0.675744", "0.675744", "0.67302835", "0.67195207", "0.67069936", "0.66958797", "0.669321", "0.6683716", "0.66791165", "0.6667084", "0.665079", "0.66416377", "0.66300374", "0.6623397", "0.6616168", "0.6605246", "0.6595268", "0.65748316", "0.6566576", "0.6548579", "0.65434176", "0.6537555", "0.651782", "0.65091467", "0.6504008", "0.64859307", "0.64812446", "0.64786965", "0.64757586", "0.64591753", "0.64531046", "0.6443554", "0.6442847", "0.64410824", "0.6432255", "0.64287275", "0.64079815", "0.6401955", "0.6397149", "0.6397149", "0.63910186", "0.6377287", "0.6359455", "0.634821", "0.63404745", "0.6334498", "0.6325185", "0.63197654", "0.6308918", "0.6287013", "0.62827563", "0.62789136", "0.6272434", "0.62693065", "0.626414", "0.62600195", "0.6253353", "0.6253353", "0.6253353", "0.6247719", "0.6247541", "0.6245736", "0.62419146", "0.6217174", "0.62059134", "0.6204197", "0.61971533", "0.61921823", "0.6183487", "0.6172053", "0.61641896", "0.6161847", "0.6159462", "0.615661" ]
0.7615898
2
Find the index of elem in the count.
def index(self, elem): if elem not in self: raise _coconut.ValueError(_coconut.repr(elem) + " not in " + _coconut.repr(self)) return (elem - self.start) // self.step if self.step else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index(self, elem):\n pointer = self.head\n i = 0\n while (pointer):\n if pointer.data == elem:\n return i\n pointer = pointer.next\n i += 1\n raise ValueError(\"{} is not in list\".format(elem))", "def index(self, elem):\n ponteiro = self.inicio\n i = 0\n while(ponteiro):\n if ponteiro.dado == elem:\n return i\n ponteiro = ponteiro.prox\n i = i + 1\n raise ValueError(\"{} is not in list\".format(elem))", "def search(elements_list, element):\n for index, item in enumerate(elements_list):\n if item == element:\n return index\n return -1", "def _get_index_of_nth_occurrence(input_list: list[Any],\n *,\n element: Any,\n count: int,\n ) -> int:\n return tuple(index for index, item in enumerate(input_list)\n if item == element)[count]", "def index(self, elem):\n return _coconut.len(self.iter) - self.iter.index(elem) - 1", "def find_index(arr, pred):\n for index, elem in enumerate(arr):\n if pred(elem):\n return index\n return -1", "def find_index(numbers, element):\n index = 0\n for item in numbers:\n if element != item:\n index += 1\n elif element == item:\n return index", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def FindPositonOfElement(self, element):\r\n for i in range(len(self.__listOfElements)):\r\n existingElement = self.__listOfElements[i]\r\n if (existingElement == element):\r\n return i\r\n raise RepositoryError(\"Inexisting Element\")", "def get_element_index(el, elements):\n for idx, element in enumerate(elements):\n diff = torch.sum(torch.abs(el - element))\n if diff.item() < 1e-8:\n return idx\n return None", "def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)", "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def indexOf(Paire,element) :\n index = -1\n if(Paire[1][0]==element):\n index = 0\n elif(Paire[1][1]==element):\n index = 1\n else :\n index = 2\n return(index)", "def search(A, v):\r\n\tfor i in range(0, len(A)):\r\n\t\tif A[i] == v:\r\n\t\t\treturn i", "def element_index(self):\n return self._index", "def index(self, pos):\n for i, n in enumerate(self):\n if i == pos: return n\n raise Exception('Index out of bounds.')", "def index(self, item):\n\t\ti = 0\t\t\n\t\tif not self.len:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\treturn i\n\t\tactual = self.prim\n\t\twhile actual and actual.dato != item:\n\t\t\tactual = actual.prox\n\t\t\ti += 1\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\treturn i", "def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1", "def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1", "def _find0(self):\n for index in range(0, self.size):\n if self.elements[index] == 0:\n return index\n return self.size", "def index_equals_value_search1(arr):\n for key, value in enumerate(arr):\n if value == key:\n return value\n return -1", "def get_tag_index(tags, tag_to_search):\n counter = 0\n for t in tags:\n if tag_to_search == t:\n break\n else:\n counter+=1\n return counter", "def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"", "def index(self, item: Any) -> int:\n index_so_far = 0\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return index_so_far\n index_so_far += 1\n curr = curr.next\n raise ValueError", "def linear_search(self, num_lst, key):\r\n # Running time: O(n)\r\n for i in range(len(num_lst)):\r\n if num_lst[i] == key:\r\n return i\r\n \r\n return -1", "def findRepIdx(self, rep, cell = 1):\n\n match = self.findRep(rep = rep, cell = cell)\n return np.arange(self.atoms.shape[0])[match]", "def index(liste, value):\n\n for ii in range(len(liste)):\n if liste[ii] == value:\n return ii\n return None", "def _search(listing, absolute_idx):\n if not listing:\n return 0\n if len(listing) == 1:\n return 0 if absolute_idx <= listing[0] else 1\n\n for idx, line_break_idx in enumerate(listing):\n if line_break_idx >= absolute_idx:\n return idx", "def find(self, data):\n index = 0\n current = self.head\n while current:\n if current.data == data:\n return index\n index += 1\n current = current.next\n\n return -1", "def get_index(self, child):\n for _index, item in enumerate(self.children):\n if item == child:\n return _index\n\n return -1", "def point_in_arr(arr, point):\n for i in range(len(arr)):\n if arr[i][0] == point[0] and arr[i][1] == point[1]:\n return i\n return -1", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def set_indx(self, item):\n for i, s in enumerate(self._data):\n if item in s:\n return i\n return None", "def linear_search(arr, x):\n for i in range(len(arr)):\n if arr[i] == x:\n return i\n \n return -1", "def __find_index(arr, val):\n if val is not None:\n return numpy.searchsorted(arr, val)\n else:\n return val", "def linear_search(arr: IntList, query: int) -> int:\n arr_len: int = len(arr)\n for idx in range(arr_len):\n if arr[idx] == query:\n return idx\n return -1", "def indexMatching(seq, condition):\n for i,x in enumerate(seq):\n if condition(x):\n return i\n return -1", "def find(self,v):\n for i in range(len(self)):\n if near(self[i],v):\n return i\n return -1", "def count(self, elem):\n if not self.step:\n return _coconut.float(\"inf\") if elem == self.start else 0\n return int(elem in self)", "def findPlace(s, lst):\n counter = 0\n for i in lst:\n if i == s:\n return counter\n else:\n counter += 1", "def index(a, x):\n i = bisect_left(a, x)\n if i != len(a) and a[i] == x:\n return i\n raise ValueError", "def index(self, item):\n \"\"\"\n :type item: Node\n :rtype int\n \"\"\"\n curr = self.head\n idx = 0\n while curr:\n if item == curr.getData():\n break\n idx += 1\n curr = curr.getNext()\n return idx", "def find_position(self, val):\n edges = np.array(self.cell_edges)\n if val in edges:\n index = np.searchsorted(edges, val)\n return index, index\n else:\n edges -= val\n if edges[0] > 0:\n return -1, 0\n if edges[-1] < 0:\n return 0, -1\n index = 0\n for i, e in enumerate(edges):\n if e > 0:\n index = i\n break\n return index - 1, index", "def offset(self, needle):\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n\n idx = self._seq.index(needle)\n if self._seq[idx+len(needle):].find(needle) != -1:\n raise ValueError(\"Multiple occurances found!\")\n\n return idx", "def main_cc(self):\n if self.E > 0:\n cc = self.cc()\n pop = np.array([np.sum(cc == k) for k in np.unique(cc)])\n idx = np.nonzero(cc == pop.argmax())[0]\n else:\n idx = 0\n return idx", "def _findPosition(self, key):\n for i in range(len(self._entryList)):\n if self._entryList[i].key == key:\n return i\n return None", "def findex(array, value):\n i = bisect.bisect_left(array, value)\n if i != len(array) and array[i] == value:\n #print(\"\\n\\n\\n FOUND!!! \\n\\n\\n\")\n return i\n \n return -1", "def index(self, element, start=0, end=None):\n if end is None: # Only bound end if value has been provided\n end = self._length\n if end < 0: # For negative indexing, convert to positive counterpart\n end = self._convert_negative_index(end)\n\n start = min(self._length, max(0, start)) # Place start in bounds if extreme\n end = min(self._length, max(0, end)) # Place end in bounds if extreme\n for i in range(start, end): # Search for element within bounds\n if self._arr[i] == element:\n return i\n raise ValueError(f'{element} not found in array') # Raise if element not found", "def safe_index(l, e):\n try:\n return l.index(e)\n except:\n return len(l)", "def getOccurence(self) -> int:\n ...", "def index(self, data):\n\n traverse = self.head\n index = 0\n while traverse.next != None:\n\n if traverse.data == data:\n return index\n traverse = traverse.next\n index += 1\n\n if traverse.data == data:\n return index", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", "def _index(self) -> int:\n return -1", "def linear_search(alist, key):\n for i in range(len(alist)):\n if alist[i] == key:\n return i\n return -1", "def research_index(self,matrix,array_to_find):\n # type: (Array,Array) -> int\n for i,element in enumerate(matrix):\n if not (element-array_to_find).any():\n return i\n raise ValueError(\"Could not find array in the matrix\")", "def _get_first_occurrence_for(iterable, wanted_object):\n for i, value in enumerate(iterable):\n if value is wanted_object:\n return i", "def get_index(self):\r\n i = 0\r\n for container in self.settings[\"containers\"]:\r\n if container[\"name\"] == self.container[\"name\"]:\r\n return i\r\n i += 1", "def index(self, data):\n\n traverse = self.head\n count = 0\n while traverse.next != None:\n\n if traverse.data == data:\n return count\n traverse = traverse.next\n count += 1\n\n if traverse.data == data:\n return count", "def find(self, list, key, value):\n for i, dic in enumerate(list):\n if dic[key] == value:\n return i\n return -1", "def linear_search(lst, value):\n i = 0\n while i != len(lst) and lst[i] != value:\n i = i + 1\n if i == len(lst):\n return -1\n else:\n return i", "def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1", "def min_search(arr: Sequence) -> int:\n\tprint(arr)\n\tmin_index = None\n\tmin_elem = arr[0]\n\tfor i in range(1, len(arr)):\n\t\tif min_elem > arr[i]:\n\t\t\tmin_elem = arr[i]\n\t\t\tmin_index = i\n\treturn min_index", "def index(self, atom):\n return self.atom_list.index(atom)", "def index(self, item):\n \n pos = 0\n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return pos\n else:\n current = current.get_next()\n pos += 1\n \n raise ValueError('{} is not in list'.format(item))", "def index(self, value):\n self.__validate_value(value)\n for index, v in enumerate(self.__list):\n if v == value:\n return index", "def search_entry_equal_to_its_index(A):\n L, R = 0, len(A) -1\n\n while L <= R:\n M = L + (R - L) // 2\n if A[M] > M:\n R = M - 1\n elif A[M] == M:\n return M\n else:\n # A[M] < M\n L = M + 1\n\n return -1", "def findidx(X, v, tol=1e-3):\n\tloc = -1\n\tdiff = 1e15 # Take a big difference\n\tn = len(X)\n\n\tfor i in xrange(n):\n\t\tndiff = abs(X[i]-v)\n\t\tif ndiff <= tol and ndiff < diff:\n\t\t\tloc = i\n\t\t\tdiff = ndiff\n\t\n\treturn loc", "def getindex(self):\n for index in range(1, len(self.quotes_list)):\n if str(index) not in self.quotes_list.keys():\n return index\n return len(self.quotes_list) + 1", "def array_search(haystack, needle):\n length = len(haystack)\n for i in range(length):\n if haystack[i] == needle:\n return i\n return -1", "def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def sublist_index(haystack, needle):\n try:\n for i in xrange(len(haystack)):\n if haystack[i:i+len(needle)] == needle:\n return i\n except IndexError:\n pass\n raise ValueError", "def linear_search(list, target):\n for i in range (0, len(list)):\n if list[i] == target:\n return i\n\n\n return None", "def find_in_column(self, column, value):\n values = [cell[column - 1] for cell in self._op.values]\n try:\n return values.index(value) + 1\n except ValueError:\n return 0", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def binary_search(elem: int, arr: Sequence) -> Optional[int]:\n if not arr:\n return None\n f_ind = 0\n l_ind = len(arr) - 1\n\n mid_ind = len(arr) // 2\n while arr[f_ind] != elem and f_ind <= l_ind:\n if elem > arr[mid_ind]:\n f_ind = mid_ind + 1\n else:\n l_ind = mid_ind - 1\n mid_ind = (f_ind + l_ind) // 2\n if arr[f_ind] == elem:\n return f_ind\n else:\n return None", "def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)", "def find(f, seq):\n\tfor num,item in enumerate(seq):\n\t\tif f(item): return num\n\treturn -1", "def find_first(item, vec):\n for i in range(len(vec)):\n if item == vec[i]:\n return i\n return -1", "def _find_position(self, element):\n walk = self._data.first()\n while walk is not None and walk.element()._value != element:\n walk = self._data.after(walk)\n return walk", "def idx(self):\n return int(self.__ph.get('idx', 0))", "def containing_slot(self, point):\n for i, bounds in enumerate(self._slot_bounds):\n if bounds.contains_point(point):\n return i + 1\n\n return None", "def idx(self):\n return self._idx", "def getIndex(self, child):\n \n if child in self._children:\n return self._children.index(child)\n else:\n return -1", "def get_square_index(self, cell):\n return next(s for s, square in enumerate(self.squares) if cell in square)", "def search_for_nums(data):\n index = None\n for i in range(len(data)-1,0, -1): #count backwards through the loop\n if data[i] != None: #found most recent input\n print(\"index found...data: %s\" % (data[i]))\n return i\n #END IF\n #END FOR\n return index", "def find(self, value):\n for position in range(self.get_size()):\n if self.table[position] == value:\n return position", "def find(self, string) -> int:\n if not self.exists(string):\n self.index[string] = 0\n return(self.index[string])", "def get_index(tag):\n global kpi_list\n try:\n return kpi_list.index(str(tag))\n except ValueError:\n return -1", "def index(self, value, start=0, stop=-1):\n return self.elem.index(value, start, stop)", "def count(self, element):\n count = 0\n for i in range(self._length): # Increment count when equal value is found\n if self._arr[i] == element:\n count += 1\n return count", "def __FindIndex(self,*Index):\n \n \n self.__CheckIndices(*Index)\n \n Num=self.__IndToNum(*Index)\n listindex= searchsorted(self.__NumList,Num)\n\n\n\n if listindex<self.NNZ():\n\n if self.__NumList[listindex]==Num:\n \n return listindex\n \n else:\n \n return -1-listindex\n \n else:\n return -1-listindex", "def occurence_(L,x):\r\n s=0\r\n for i in range(len(L)):\r\n if L[i]==x:\r\n s+=1\r\n return(s)", "def find_index(arr, val):\n index = 0\n min_differ = abs(arr[0] - val)\n for i in range(1, len(arr)):\n if abs(arr[i] - val) < min_differ:\n min_differ = abs(arr[i] - val)\n index = i\n return index", "def num_of_ele(self, ele):\n return self.ele_freqs[ele]", "def elementIndex(self, element):\n\n nel = self.nElements()\n if type(element) == types.IntType:\n m = element\n else:\n m = _cantera.phase_elementindex(self._phase_id, element)\n if m < 0 or m >= nel:\n raise CanteraError(\"\"\"Element \"\"\"+element+\"\"\" not in set \"\"\"\n +`self.elementNames()`)\n return m", "def get_position(vertex, partition):\n index = 0\n for part in partition:\n if vertex in part:\n break\n index = index + 1\n return index" ]
[ "0.7664608", "0.74272794", "0.7314248", "0.7284313", "0.7212289", "0.71917605", "0.7169984", "0.70909643", "0.7077145", "0.7066599", "0.7030703", "0.6965797", "0.69524926", "0.69229907", "0.689439", "0.68874395", "0.68740386", "0.68163633", "0.6796156", "0.6779805", "0.67458326", "0.67452544", "0.6717636", "0.6712589", "0.67038155", "0.67034703", "0.6668958", "0.6639836", "0.66230756", "0.661595", "0.6589863", "0.65880865", "0.65880865", "0.6582758", "0.65644246", "0.65625244", "0.6561503", "0.65605885", "0.65587056", "0.6556714", "0.6553298", "0.65515625", "0.65509367", "0.6503702", "0.6471276", "0.64632004", "0.6444692", "0.64361", "0.6430219", "0.6429433", "0.64268523", "0.6420809", "0.64160734", "0.64160734", "0.64020264", "0.639894", "0.6396958", "0.6395326", "0.63898104", "0.6369491", "0.63685274", "0.63669944", "0.6361629", "0.6350721", "0.6343421", "0.6338625", "0.63293207", "0.6319226", "0.63164705", "0.63137746", "0.6309952", "0.63000095", "0.62936586", "0.6286314", "0.6281378", "0.6266762", "0.6260147", "0.62562203", "0.6246013", "0.624093", "0.6235174", "0.6233496", "0.62262684", "0.6219143", "0.6210273", "0.6203003", "0.6199134", "0.61981946", "0.61954325", "0.6194887", "0.6162111", "0.6160693", "0.61493593", "0.6148434", "0.614539", "0.61378866", "0.6135319", "0.61308384", "0.612812", "0.61257493" ]
0.70635706
10
Decorator to add a new case to a patternmatching function, where the new case is checked last.
def addpattern(base_func, **kwargs): allow_any_func = kwargs.pop("allow_any_func", False) if not allow_any_func and not _coconut.getattr(base_func, "_coconut_is_match", False): _coconut.warnings.warn("Possible misuse of addpattern with non-pattern-matching function " + _coconut.repr(base_func) + " (pass allow_any_func=True to dismiss)", stacklevel=2) if kwargs: raise _coconut.TypeError("addpattern() got unexpected keyword arguments " + _coconut.repr(kwargs)) return _coconut.functools.partial(_coconut_base_pattern_func, base_func)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_case(self, text=None, type=None, pattern=None, pattern_flags=0,\n preexe=False):\n def wrapper(func):\n if preexe:\n self.preexe_cases.append((func, text, type, pattern, pattern_flags))\n else:\n self.cases.append((func, text, type, pattern, pattern_flags))\n return func\n\n return wrapper", "def _add_flag_case(self, flag):\n\n def wrapper(func):\n self.flag_cases.append((func, flag))\n return func\n\n return wrapper", "def replace_with(*, replacement='hello'):\n def _apply_decorator(f):\n return replacement\n return _apply_decorator", "def build_match_and_apply_functions(pattern, search, replace):\n\n def matches_rule(word):\n \"\"\" Check if word contains pattern.\n \"\"\"\n return re.search(pattern, word)\n\n def apply_rule(word):\n \"\"\" Replace text with replacement in word.\n \"\"\"\n return re.sub(search, replace, word)\n\n return (matches_rule, apply_rule)", "def replace_with(*, replacement):\n def _apply_decorator(f):\n return replacement\n return _apply_decorator", "def redacorator(func):\n def _replace(match):\n ori = match.group()\n text = match.group().strip().lower()\n return func(text, ori)\n return _replace", "def add_match(self, f, exclusions=None, **match_kwargs):\n assert not self._checked, 'can\\'t add after matchlist has been checked'\n\n if not match_kwargs: # Do nothing if no match_kwargs.\n return f\n\n self._verify_match_kwargs(match_kwargs, exclusions)\n self.matchers.append((match_kwargs, exclusions, f))\n return f", "def apply_rule(operator, pattern, replacement):\n new_op = operator.match_first(pattern)\n if new_op is None:\n return None\n return new_op.replace_first(\"generic\", replacement)", "def cases(cases_):\n\n def wrapper(func):\n def inner(*args):\n for case in cases_:\n new_args = args + (case if (isinstance(case, tuple)) else (case,))\n func(*new_args)\n\n return inner\n\n return wrapper", "def add_pattern(self, pattern, callback):\n self.patterns.append((pattern, callback))", "def replace_with(*, replacement='hello', f=DECORATED):\n return replacement", "def register_rule(pattern: matcher.Pattern) -> Callable[..., rules.Rule]:\n\n def register(handler: Callable[..., Any]) -> rules.Rule:\n return rules.make_rule(pattern, handler)\n\n return register", "def fnmatchcase(name, pat):\r\n\r\n if not pat in _cache:\r\n res = translate(pat)\r\n if len(_cache) >= _MAXCACHE:\r\n _cache.clear()\r\n _cache[pat] = re.compile(res)\r\n return _cache[pat].match(name) is not None", "def add(self, method: str, pattern: str, handler: Callable) -> None:", "def gen_case(self, stmt: statements.Case) -> None:\n block = self.builder.new_block()\n assert self.switch_options is not None\n value = self.context.eval_expr(stmt.value)\n if value in self.switch_options:\n self.error(\"Case defined multiple times\", stmt.location)\n self.switch_options[value] = block\n self.builder.emit_jump(block) # fall through\n self.builder.set_block(block)\n self.gen_stmt(stmt.statement)", "def __call__(self, *patterns):\n\n\n # defines the decorator that adds the patterns to the function lookup\n def decorator(func):\n func_args = inspect.getargs(func.__code__)\n func_name = func.__name__\n\n if len(patterns) != len(func_args.args):\n raise ChainsmokePatternMatchError(\n \"Number of patterns needs to equal number of args in {func_name}\".format(func_name=func_name))\n\n self.funcs[patterns] = func\n\n # define a function that gives a result from the matched function\n def inner(*inner_args):\n if not self.funcs.get((otherwise,)):\n raise ChainsmokePatternMatchError(\n \"Incomplete pattern match for {func_name}; try adding an 'otherwise' case\".format(\n func_name=func_name))\n\n matched_function = self.find_func(inner_args)\n return matched_function(*inner_args)\n\n return inner\n\n return decorator", "def fnmatch(self, pattern):\n return FNMatcher(pattern)(self)", "def test_stepregistry_step_decorator_should_register_func_with_proper_keyword(keyword):\n # given\n registry = StepRegistry()\n context = {}\n registry.create_step_decorators(context)\n\n # when\n def test_step():\n ...\n\n test_step = context[keyword.lower()](\"pattern\")(test_step)\n\n # then\n assert registry.step_implementations(keyword) == [\n StepImpl(keyword, \"pattern\", test_step)\n ]", "def replace_partial_args(\n func: Callable, target: Any, repl: Any,\n match_func: Optional[Callable] = None,\n replace_func: Optional[Callable] = None,\n) -> Callable:\n if match_func is None:\n def match_func(i, v, t): return v is t\n if replace_func is None:\n def replace_func(v, r): return r\n args = [(replace_func(v, repl) if match_func(i, v, target) else v)\n for i, v in enumerate(func.args)]\n kwargs = {k: (replace_func(v, repl) if match_func(k, v, target) else v)\n for k, v in func.keywords.items()}\n new_func = type(func)(func.func, *args, **kwargs)\n return new_func", "def replaces(func: Callable[..., Tuple[str]], name: str):\n Replacements._rep[name] = func\n return func", "def replace_with(*, replacement, f=DECORATED):\n return replacement", "def replace_if(condition, replacement):\n def _decorator(fn):\n @functools.wraps(fn)\n def _fn(*args, **kwargs):\n if condition(*args, **kwargs):\n return replacement(*args, **kwargs)\n return fn(*args, **kwargs)\n return _fn\n return _decorator", "def rules_with_cases(cls, search_pattern: str, CaseEnum: Type[TCaseEnum], detect_case: Callable[[Match], TCaseEnum], rules: Callable[[TCaseEnum], List[Dict[str, str]]]) -> PhonTransform:\n cases: List[TCaseEnum] = list(CaseEnum)\n rule_dict = {case: {k: v for rule in rules(case) for k, v in rule.items()} for case in cases}\n sub_func = lambda match: rule_dict[detect_case(match)][match['key']]\n return cls(search_pattern, sub_func)", "def decorated(origFunc, newFunc, decoration='None'):\n\n pass", "def add_substitution(self, pattern, repl):\r\n\r\n self.substitutions.append( (re.compile(pattern), repl) )", "def visit_function(self, func):\n func.name = self.function_rewrites.get(func.name, func.name)\n return super(AccessCompiler, self).visit_function(func)", "def strategy(func):\n strategies.append(func)\n return func", "def changed(func, name):\n @functools.wraps(func)\n def decorateit(*args, **kwargs):\n if func.__name__ not in changed_list:\n changed_list.append(func.__name__)\n logger.debug(\"function {} changed by {}! Check if it works as you think\".format(name))\n return func(*args, **kwargs)\n return decorateit", "def add_pattern(self, pattern):\n self.patterns.append(pattern)", "def test_tolerate_decorated_function_raise_if_switch_fail():\n def test_function():\n raise AttributeError()\n def test_switch(*args, **kwargs):\n return False, args, kwargs\n fn = tolerate(switch=test_switch)(test_function)\n fn()", "def _memorized_fnmatch(name: str, pattern: str) -> bool:\n return bool(_compile_fnmatch(pattern).match(name))", "def depth_from_match(function):\n def wrap(start, values):\n #print 'Depth %d | %d %s' %(self._depth, start, values)\n #print self._current_node\n self._depth = start\n self._current_node = function(values)\n #print self._current_node\n return ''\n\n return wrap", "def captive(f):\n\n def wrapper(self, request, *args, **kwargs):\n return captiveHandler(request) or f(self, request, *args, **kwargs)\n functools.update_wrapper(wrapper, f)\n return wrapper", "def case(self, key: Union[Hashable, Container], result: Union[Callable, Any], force_contains: bool = False, **kwargs):\n self._cases.__setitem__(key, (result, kwargs), force_contains = force_contains)", "def make_func_repeater(f, x):\n\n def repeat(i, x=x):\n if i == 0:\n return x\n else:\n return repeat(i-1, f(x))\n return repeat", "def _maybe_add_pattern(attr, patterns):\n handler_type = getattr(attr, '_gen_handler', False)\n\n if not handler_type:\n return\n if handler_type not in ['call', 'cast', 'info']:\n raise AttributeError(\"unknown handler type {}\".format(handler_type))\n\n o = attr._gen_order\n p = attr._gen_pattern\n LOG.debug(\"adding {} {} with pattern {}\".format(handler_type,\n attr,\n p))\n patterns[handler_type].append((o, p))", "def switch(cond, ift, iff):", "def _mayus(func):\r\n\t\tdef decorator(self, key, *args):\r\n\t\t\treturn func(self, str(key).upper(), *args)\r\n\t\treturn decorator", "def route(self, pattern: str) -> Callable:\n\n def warpper(view: Callable) -> Callable:\n self.add_route(pattern, view)\n return view\n\n return warpper", "def match(var, *args, default=NoDefault, strict=True):\n if len(args) % 2 != 0:\n raise MatchError(\"Every guard must have an action.\")\n\n if default is NoDefault and strict is False:\n default = False\n\n pairs = list(pairwise(args))\n patterns = [patt for (patt, action) in pairs]\n\n for patt, action in pairs:\n matched_as_value, args = match_value(patt, var)\n\n if matched_as_value:\n lambda_args = args if len(args) > 0 else BoxedArgs(var)\n return run(action, lambda_args)\n\n if default is NoDefault:\n if _ not in patterns:\n raise MatchError(\"'_' not provided. This case is not handled:\\n%s\" % str(var))\n else:\n return default", "def lift(f, message):\n def lift_impl(x):\n if f(x):\n return return_(x)\n return Fail(message)\n\n return lift_impl", "def chat_handler(self, regex, order=100):\n def decorator(func):\n self.register_handler(regex, func, order)\n return func\n\n return decorator", "def group_match(\n self, component: str\n ) -> Callable[[_ZhaGroupEntityT], _ZhaGroupEntityT]:\n\n def decorator(zha_ent: _ZhaGroupEntityT) -> _ZhaGroupEntityT:\n \"\"\"Register a group match rule.\"\"\"\n self._group_registry[component] = zha_ent\n return zha_ent\n\n return decorator", "def replace_function(self, pfunc, index = -1):\n raise NotImplementedError()", "def override(fnc=None):\n\n if fnc is None:\n return lambda x: x\n else:\n return _Override(fnc)", "def regex_replace_value(val, val_new, pattern,\n val_exception=np.nan):\n try:\n if not bool(re.match(pattern, val)):\n return val_new\n else:\n return val\n except:\n return val_exception", "def multiline_regex_suggestor(regex, substitution=None, ignore_case=False):\n if isinstance(regex, str):\n if ignore_case is False:\n regex = re.compile(regex, re.DOTALL | re.MULTILINE)\n else:\n regex = re.compile(regex, re.DOTALL | re.MULTILINE | re.IGNORECASE)\n\n if isinstance(substitution, str):\n def substitution_func(match):\n return match.expand(substitution)\n else:\n substitution_func = substitution\n\n def suggestor(lines):\n pos = 0\n while True:\n match = regex.search(''.join(lines), pos)\n if not match:\n break\n start_row, start_col = _index_to_row_col(lines, match.start())\n end_row, end_col = _index_to_row_col(lines, match.end() - 1)\n\n if substitution is None:\n new_lines = None\n else:\n # TODO: ugh, this is hacky. Clearly I need to rewrite\n # this to use\n # character-level patches, rather than line-level patches.\n new_lines = substitution_func(match)\n if new_lines is not None:\n new_lines = ''.join((\n lines[start_row][:start_col],\n new_lines,\n lines[end_row][end_col + 1:]\n ))\n\n yield Patch(\n start_line_number=start_row,\n end_line_number=end_row + 1,\n new_lines=new_lines\n )\n delta = 1 if new_lines is None else min(1, len(new_lines))\n pos = match.start() + delta\n\n return suggestor", "def apply_rule(word):\n return re.sub(search, replace, word)", "def callback(scanner, name, obj):\n cases_dict = self.variant_of.__adt__['cases']\n case_implementations = cases_dict[case_name]\n if case_implementations[self.name] is not None:\n # venusian may scan the same declarations multiple times during the app initialization,\n # therefore we allow re-assignment of the same case implementations and prohibit\n # any new implementations\n if case_implementations[self.name] is not obj:\n raise TypeError(\n 'Variant {variant} of {type} is already bound to the case {case} => {impl}. '\n 'Conflict at {target}'.format(\n variant=self.name,\n type=self.variant_of.__adt__['type'],\n case=case_name,\n impl=str(case_implementations[self.name]),\n target=str(obj)\n )\n )\n case_implementations[self.name] = obj\n # Re-calculate matches\n self.variant_of.__adt__['matches'] = {\n variant: {case: impl[variant] for case, impl in cases_dict.items()}\n for variant in self.variant_of.__adt__['variants']\n }\n scanner.config.update_adt_registry(self.variant_of.__adt__)", "def single_case(self, i, case):\n\n # Pop a previous case if we have consecutive ones.\n if self.single_stack:\n self.single_stack.pop()\n self.single_stack.append(case)\n try:\n t = next(i)\n if self.use_format and t in _CURLY_BRACKETS:\n self.handle_format(t, i)\n elif t == '\\\\':\n try:\n t = next(i)\n self.reference(t, i)\n except StopIteration:\n self.result.append(t)\n raise\n else:\n this_case = self.get_single_stack()\n if this_case is not None:\n self.result.append(self.convert_case(t, this_case))\n except StopIteration:\n pass", "def fnmatch(name, pat):\r\n\r\n import os\r\n name = os.path.normcase(name)\r\n pat = os.path.normcase(pat)\r\n return fnmatchcase(name, pat)", "def addMatchEntry(tmpFD, match, mtype, val):\n tmpFD.write(f\" {match} {mtype} {val}\\n\")", "def create_pattern_function(self):\n\n type_regex = \"(?:\\w+(?:\\:\\:)?)+\"\n regex = \"^(?P<indent>\\s*)(?P<virtual>virtual )?(?P<function_return>(?:const )?\" + type_regex + \"(?P<subtype><?\" + type_regex + \">?)?) (?P<function_name>.*)\\((?P<args>.*)\\)(?P<const_qualifier> const)?(?: = 0)?;\\n$\"\n return regex", "def make_patch(patcher, function):\n def wrapper_function(self, *args, **kwargs):\n if not patcher.target_key_down:\n function(self, *args, **kwargs)\n\n return wrapper_function", "def span_case(self, i, case):\n\n # A new \\L, \\C or \\E should pop the last in the stack.\n if self.span_stack:\n self.span_stack.pop()\n if self.single_stack:\n self.single_stack.pop()\n self.span_stack.append(case)\n count = len(self.span_stack)\n self.end_found = False\n try:\n while not self.end_found:\n t = next(i)\n if self.use_format and t in _CURLY_BRACKETS:\n self.handle_format(t, i)\n elif t == '\\\\':\n try:\n t = next(i)\n self.reference(t, i)\n except StopIteration:\n self.result.append(t)\n raise\n else:\n self.result.append(self.convert_case(t, case))\n if self.end_found or count > len(self.span_stack):\n self.end_found = False\n break\n except StopIteration:\n pass\n if count == len(self.span_stack):\n self.span_stack.pop()", "def step(step_name, extra_types=None):\n\n def decorator(func):\n # Register the step, other way return the function unchanged\n step_function = StepFunction(func, step_name, extra_types)\n # Check for similar steps, in both directions\n step_function.search_and_report_similar()\n # Register it\n data.add_step(step_function)\n return func\n\n return decorator", "def register_rule(cls, rule_func):\n cls._rules_factories.append(rule_func)", "def _fnmatch_lower(name: str | None, pattern: str) -> bool:\n if name is None:\n return False\n return fnmatch.fnmatch(name.lower(), pattern)", "def identify(func):\n def identified(arg):\n func(arg)\n return arg\n return identified", "def _assign_pattern(where, key, what):\n if what:\n where[key] = what", "def _create_new_criteria_function(lm,mm):\n return lambda D, i, u, j: parametrized_insertion_criteria(D, i, u, j,\n lm=lm, mm=mm)", "def wrap_gate(fn):\n return lambda parms: fn(**parms) if len(parms) > 0 else fn", "def delegate(func):\n @functools.wraps(func)\n def wrapped(self, *args, **kwargs):\n path = args[0]\n handler = self.router.match(func.__name__, path)\n return handler(*args, **kwargs)\n return wrapped", "def wrapped(func):\n self.routes.append((path, {\n 'regex': re.compile('^' + re.sub(self._part_matcher,'(.*?)',path) + '$'),\n 'function':func,\n 'reqs':req,\n 'kwargs':kwargs,\n 'parts':parts_info,\n 'generate':generate\n }))\n\n return func", "def original_case(value: str, **kwargs: Any) -> str:\n return value", "def on_match(self, patterns, priority=0):\n\n def decorator(func):\n router = ListRouter(priority=priority)\n\n @functools.wraps(func)\n async def _wrapper(update, ctx):\n if not isinstance(update, Message):\n return SKIPPED\n\n for pattern in patterns:\n match = re.match(pattern, update.text)\n if match:\n break\n else:\n return SKIPPED\n\n ctx.match = match\n\n return await func(update, ctx)\n\n router.add_handler(_wrapper)\n\n self._routers.append(router)\n\n return func\n\n return decorator", "def walk(rv, F):\n args = getattr(rv, 'args', None)\n if args is not None:\n if args:\n newargs = tuple([walk(a, F) for a in args])\n if args != newargs:\n rv = rv.func(*newargs)\n if simultaneous:\n # if rv is something that was already\n # matched (that was changed) then skip\n # applying F again\n for i, e in enumerate(args):\n if rv == e and e != newargs[i]:\n return rv\n rv = F(rv)\n return rv", "def _flag_function_or_expanded_sequence(s):\n name, args = decode_flag_function(s)\n if _is_anonymous_flag_function(name, args):\n return s\n if _is_sequence_flag_function(name):\n return _expand_sequence(name, args)\n raise ValueError(s)", "def register( self, pattern, callback ):\n self.patterns.append((pattern, callback))", "def intercept(self, event_pattern, priority, callback):\n pass # pragma: no cover", "def decor(flag_string=''):\n def decor1(f):\n def wrap(*args, **kwargs):\n if flag_string and flag_string in args and flag_string in kwargs.values():\n print(\"{} has been called with following parameters:\".format(f))\n print(args)\n elif not flag_string:\n print(\"flag_string is empty!\")\n return f(*args)\n return wrap\n return decor1", "def test_add_or_update_case(self):\n pass", "def match(*args, priority=0):\n import inspect\n for arg in args:\n if not isinstance(arg, (set, str)) and arg is not None:\n raise TypeError(f\"match_fun() invalid argument: {arg}\")\n\n match_rules = []\n for arg in args:\n if arg == None or type(arg) == set:\n match_rules.append(arg)\n elif isinstance(arg, str):\n match_rules.append({s for s in arg.split(\"|\") if s})\n else:\n raise TypeError(f\"wrong type of argumment: {type(arg)}, {arg}\")\n\n arg_count = len([type(arg) for arg in args]) + 1\n\n def decorator(func):\n paramaters = inspect.signature(func).parameters\n if len(paramaters) is not arg_count:\n if not inspect.Parameter.VAR_POSITIONAL in {p.kind for p in paramaters.values()}:\n raise TypeError(\n f\"function {func} does not contain {arg_count} argumments\")\n return MatchRuleWrapper(func, match_rules, priority)\n\n return decorator", "def adapt_choice_f(choice_f):\r\n result = lambda ids, seqs='ignored': choice_f(ids)\r\n return result", "def add_case(self, name):\n mod = self._mod\n std = mod.give_aster_study()\n prefs = aster_s_gui.AsterPreferences()\n \n case = std.add_case(self.find_new_name(std, name))\n case.use(aster_s.CommFile(self.get_str(\"command-file\")))\n case.use(aster_s.SMeshEntry(self.give_field(\"mesh\").node.entry))\n if prefs.get(aster_s_gui.InteractiveFollowUp):\n case.use(aster_s.InteractivFollowUp())\n if prefs.get(aster_s_gui.SaveBaseResult):\n case.use(aster_s.HasBaseResult())\n mod.update()\n #salome.sg.updateObjBrowser(0)", "def chat_handler(self, regex, order=100):\n if not isinstance(regex, RE_TYPE):\n regex = re.compile(regex, re.I)\n\n def decorator(func):\n self._chat_handlers.append((order, regex, func))\n # Use only the first value to sort so that declaration order doesn't change.\n self._chat_handlers.sort(key=lambda x: x[0])\n return func\n\n return decorator", "def partialmethod(func, arg):\n return lambda *args, **kwargs: func(arg, *args, **kwargs)", "def bind(f):\n def bind_impl(x):\n if is_fail(x):\n return x\n if is_pass(x):\n return f(x.value)\n raise ValueError('Check has to be of type Pass | Fail.')\n\n return bind_impl", "def heuristics(values):\n def decorator(fn):\n return Heuristics(fn, fn.arg_names, values)\n\n return decorator", "def defer_lowering(self, key, lower_fn):\n with self._lock:\n if key in self._no_defer:\n # Key is marked as no defer, register lowering now\n lower_fn()\n else:\n # Defer\n self._deferred[key].append(lower_fn)", "def gen_range_case(self, stmt: statements.Case) -> None:\n block = self.builder.new_block()\n assert self.switch_options is not None\n # TODO: This could lead to a very big if-then-else chain?\n value1 = self.context.eval_expr(stmt.value1)\n value2 = self.context.eval_expr(stmt.value2)\n for value in range(value1, value2 + 1):\n if value in self.switch_options:\n self.error(\"Case defined multiple times\", stmt.location)\n self.switch_options[value] = block\n\n self.builder.emit_jump(block) # fall through\n self.builder.set_block(block)\n self.gen_stmt(stmt.statement)", "def nextf(f, offset=1):\n def feature(s, i):\n i += offset\n return i < len(s) and f(s, i)\n return feature", "def find_func(self, params):\n match = self.funcs.get(params, self.funcs[(otherwise,)])\n return match", "def selfie_depreceated(f):\n return partial(f, f)", "def replacement(cls, search_pattern: str, replacement: str) -> PhonTransform:\n sub_func = lambda match: replacement\n return cls(search_pattern, sub_func)", "def add_pattern(self, name, pattern=None):\n self._pattern_reg.add_pattern(name, pattern)", "def register_side_effect(label, func):\n if func in _registry[label]:\n return\n _registry.add(label, func)", "def when(self, case_expr, result_expr) -> Self:\n cases = self.cases + (case_expr,)\n results = self.results + (result_expr,)\n return self.copy(cases=cases, results=results)", "def route(self, url_pattern, methods=None):\n def decorated(f):\n self.url_map.append(\n ([m.upper() for m in (methods or ['GET'])],\n URLPattern(url_pattern), f))\n return f\n return decorated", "def test_add_url_rule_duplicate_with_replace():\n\n application_services.add_url_rule('/tests/application/duplicate/rule2',\n view_func=mock_view_function,\n methods=HTTPMethodEnum.POST)\n\n application_services.add_url_rule('/tests/application/duplicate/rule2',\n view_func=mock_view_function,\n methods=HTTPMethodEnum.GET,\n replace=True)", "def register_side_effect(label: str, func: Callable) -> None:\n if func in _registry[label]:\n return\n _registry.add(label, func)", "def replaces_operator(func: Callable[[Any, Any, str, str], Tuple[str]],\n classname: str,\n optype: str,\n otherclass: str = None):\n if otherclass is None:\n otherclass = classname\n Replacements._oprep[(classname, otherclass, optype)] = func\n return func", "def add_to_apply_calls(self, func, *args, **kwargs):\n pass", "def fallback(self, old, new):\n hit = False\n args = list(self.args)\n for i, arg in enumerate(args):\n if not hasattr(arg, '_eval_subs'):\n continue\n arg = arg._subs(old, new, **hints)\n if not _aresame(arg, args[i]):\n hit = True\n args[i] = arg\n if hit:\n rv = self.func(*args)\n hack2 = hints.get('hack2', False)\n if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack\n coeff = S.One\n nonnumber = []\n for i in args:\n if i.is_Number:\n coeff *= i\n else:\n nonnumber.append(i)\n nonnumber = self.func(*nonnumber)\n if coeff is S.One:\n return nonnumber\n else:\n return self.func(coeff, nonnumber, evaluate=False)\n return rv\n return self", "def add_filter(self, f):\n raise NotImplementedError", "def replaces_method(func: Callable[..., Tuple[str]], classname: str, method_name: str):\n Replacements._method_rep[(classname, method_name)] = func\n return func", "def _default_wrapper(recons_func, **kwargs):\n return recons_func(**kwargs)", "async def addcase(self, ctx, problem_name, *, arg):\n if not await problem_exists(ctx, problem_name):\n return\n list = arg.split('|')\n expected_input = list[0]\n expected_output = list[1]\n problems[problem_name].cases[expected_input] = expected_output\n message = 'Case successfully added.\\n'\n message += 'Expected input:```\\n' + expected_input + '```'\n message += 'Expected output:```\\n' + expected_output + '```'\n await ctx.send(message)\n await write_problems()", "def _callback(self, matcher):\n matched_field = matcher.group(self.field)\n replacement = self.lookup.get(matched_field)\n if not replacement:\n return matcher.group(0)\n\n fields = list(f or \"\" for f in matcher.groups())\n fields[self.field - 1] = replacement\n\n return \"\".join(fields)", "def relate_pattern(self, other, pattern): # -> bool:\n ..." ]
[ "0.6772898", "0.6573733", "0.5659867", "0.553022", "0.5431647", "0.5417446", "0.5378049", "0.53306925", "0.5268356", "0.5241548", "0.5207836", "0.51987594", "0.511116", "0.5002985", "0.49989533", "0.49934804", "0.49919218", "0.4984045", "0.49082255", "0.49020272", "0.4897801", "0.4881766", "0.48704672", "0.4864377", "0.48457196", "0.4836121", "0.48320782", "0.4817322", "0.4805832", "0.47709176", "0.47135052", "0.47076783", "0.4677338", "0.46720028", "0.46717486", "0.46603283", "0.46526787", "0.462528", "0.46248364", "0.4623371", "0.4612505", "0.4593112", "0.45880172", "0.4578915", "0.4571237", "0.45697176", "0.45602873", "0.45507884", "0.45466027", "0.45421156", "0.45372143", "0.45326325", "0.45084238", "0.45065406", "0.45045957", "0.45039803", "0.45028082", "0.4497121", "0.44839013", "0.44774398", "0.44733632", "0.44731322", "0.44717288", "0.4470626", "0.4466397", "0.44631487", "0.44620734", "0.44486722", "0.4440273", "0.44300815", "0.44285375", "0.44227275", "0.4418827", "0.4415155", "0.4412796", "0.4406861", "0.44061944", "0.4386236", "0.43854848", "0.43811634", "0.43796265", "0.4367621", "0.4365738", "0.43656847", "0.43632817", "0.43629465", "0.4361836", "0.4351702", "0.43468416", "0.43454468", "0.43407702", "0.4336276", "0.43343547", "0.4333878", "0.43249053", "0.4322045", "0.43199643", "0.4317297", "0.43138647", "0.43097118" ]
0.6158975
2
consume(iterable, keep_last) fully exhausts iterable and return the last keep_last elements.
def consume(iterable, keep_last=0): return _coconut.collections.deque(iterable, maxlen=keep_last)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last(iterable):\n d = deque(iterable, maxlen=1)\n try:\n return d.pop()\n except IndexError:\n raise ValueError(\"Cannot return last item from empty iterable {!r}\".format(iterable))", "def last(iterable):\n it = iter(iterable)\n item = next(it)\n for item in it:\n pass\n return item", "def return_last(iter):\n for thing in iter:\n pass\n return thing", "def last(iterator):\n item = None\n for item in iterator:\n pass\n return item", "def consume(iterator):\n deque(iterator, maxlen=0)", "def last(seq):\n try:\n return seq[-1]\n except TypeError:\n old = None\n it = iter(seq)\n while True:\n try:\n old = next(it)\n except StopIteration:\n return old", "def last(iterable, *default):\n\tassert len(default) <= 1\n\titerable = iter(iterable)\n\n\ttry:\n\t\tx = next(iterable)\n\texcept StopIteration:\n\t\tif default:\n\t\t\treturn default[0]\n\t\traise\n\n\tfor x in iterable:\n\t\tpass\n\treturn x", "def tail(self, *, peek=False, ignore_no_item_found=False, only_new_items=False):\n with self._buffer.reader_lock() as reader:\n if only_new_items:\n try:\n item = reader.find_last_gt(self.last_key)\n except ValueError as e: # ValueError: No item found with key above: self.last_key\n if ignore_no_item_found:\n return None\n raise e\n except TypeError as e:\n # TypeError: '>' not supported between instances of type(key) and 'NoneType'\n if self.last_item is None: # first time reading a value from buffer\n item = reader[-1]\n else:\n raise e\n else:\n try:\n item = reader[-1]\n except IndexError as e: # IndexError: deque index out of range\n if ignore_no_item_found:\n return None\n raise e\n if not peek:\n self.last_item = item\n return item", "def getLatest(self, limit=None):\n count = len(self)\n if limit is None or limit > count:\n limit = count\n if not limit:\n return []\n return list(self[-limit:])", "def take_last(\n count: int,\n) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:\n\n def _take_last(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:\n async def subscribe_async(aobv: AsyncObserver[_TSource]) -> AsyncDisposable:\n safe_obv, auto_detach = auto_detach_observer(aobv)\n queue: List[_TSource] = []\n\n async def asend(value: _TSource) -> None:\n queue.append(value)\n if len(queue) > count:\n queue.pop(0)\n\n async def aclose() -> None:\n for item in queue:\n await safe_obv.asend(item)\n await safe_obv.aclose()\n\n obv = AsyncAnonymousObserver(asend, safe_obv.athrow, aclose)\n return await pipe(obv, source.subscribe_async, auto_detach)\n\n return AsyncAnonymousObservable(subscribe_async)\n\n return _take_last", "def tail(iterable, n):\n if n <= 0:\n return []\n return list(deque(iterable, maxlen=n))", "def pick(iterable):\n for element in iterable:\n yield element\n while True:\n yield element", "def peek_rear(self):\n if self.items:\n return self.items[-1]\n return None", "def until_last(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[:matches[-1]+1])", "def dropwhile(iterable, predicate):\n return iter(it.dropwhile(predicate, iterable))", "def take_while(coll, func):\n i = 0\n while i < len(coll) and func(coll[i]):\n i += 1\n return coll[:i]", "def butlast(mylist):\n # This returns a copy of mylist\n return mylist[:-1]", "def take(num, iterable):\n return list(islice(iterable, num))", "def get_only(seq: Iterable[T]) -> T:\n it = iter(seq)\n try:\n first_element = it.__next__()\n # we use the sentinel approach rather than the usual (evil) Python \"attempt can catch the\n # exception\" approach to avoid raising zillions of spurious exceptions on the expected\n # code path, which makes debugging a pain\n sentinel = object()\n second_element = next(it, sentinel)\n if second_element is sentinel:\n return first_element\n else:\n got_msg: str\n if isinstance(seq, Sized):\n got_msg = str_list_limited(seq, limit=10)\n else:\n got_msg = f\"{first_element!r}, {second_element!r}, and possibly more.\"\n raise ValueError(f\"Expected one item in sequence but got {got_msg}\")\n except StopIteration:\n raise ValueError(\"Expected one item in sequence but got none\")", "def PeekIterable(iterable):\n try:\n head_element = iterable.next()\n new_iterable = itertools.chain([head_element], iterable)\n return head_element, new_iterable\n except StopIteration:\n return None, iterable", "def consume(iterator, n=None, next=next, islice=islice, deque=deque):\n if n is not None:\n next(islice(iterator, n, n), None)\n else:\n exhaust(iterator)", "def peek(iterable, size=1):\r\n objs = []\r\n for _ in range(size):\r\n try:\r\n obj = next(iterable)\r\n except StopIteration:\r\n break\r\n objs.append(obj)\r\n return objs, itertools.chain(objs, iterable)", "def test_that_peekleft_returns_tail_from_list_of_iterables(iterable):\n from deque import Deque\n new_deque = Deque(iterable)\n assert new_deque.peekleft() == new_deque.popleft()", "def consume(\n self,\n count: int = 1,\n ) -> List[T]:\n return list(islice(self, count))", "def peek_last(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n return self.tail.data", "def get_last(self, limit = 1):\n if len(self.data) == 0:\n return None\n self.sort_and_reduce()\n if len(self.data) < limit:\n limit = len(self.data)\n\n return self.data[-limit:][0]", "def take_max(self):\n return self.delete_first()", "def first_every_last(iterable, first, every, last):\n did_first = False\n for item in iterable:\n if not did_first:\n first(item)\n every(item)\n if did_first:\n last(item)", "def test_deque_works_on_diff_iterables_head_is_last_val_pushed_after_pop(iterable):\n from deque import Deque\n new_deque = Deque(iterable)\n new_deque.pop()\n assert new_deque._container.head.val == iterable[-2]", "def drop(iterable, n, islice=islice):\n return islice(iterable, n, None)", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def last_el(x):\n if N.isscalar(x): return x\n else: return x[-1]", "def get_last(self, count):", "def take_rest():\n def run(chunks, chunk, last):\n if last:\n return ParserResult.from_done(_chunks_merge((chunk, chunks)), chunk[:0], last)\n else:\n return ParserResult.from_partial(Parser(run, (chunk, chunks)))\n return Parser(run, tuple())", "def _Peek(self):\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n except StopIteration:\n self._peek_seen = True\n return None\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except (AttributeError, IndexError, KeyError, TypeError):\n pass\n # Object is not iterable -- treat it as the only item.\n return self._iterable", "def last_item(self):\n return self.container[self.length-1]", "def take(n, iterable):\n return list(islice(iterable, n))", "def take(n, iterable):\n return list(islice(iterable, n))", "def remove_last_item_from_stack(self):\n if self.length > 0:\n return self.container.pop()\n else:\n return None", "def take(iterable, n):\n return list(itertools.islice(iterable, n))", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def with_previous(iterable, *, fillvalue=None):\n previous = fillvalue\n for item in iterable:\n yield previous, item\n previous = item", "def reduce_ends(\n self,\n preserve=[\"s22\", \"ld\"],\n tries=10,\n threads=1,\n returntype=\"equiv\",\n best=1,\n key=None,\n initequiv=None,\n ):\n raise NotImplementedError\n # from fastreduceD import fastreduce\n # return fastreduce.reduce_ends(\n # self, preserve, tries, threads, returntype, best, key, initequiv\n # )", "def take(n, iterable, islice=islice):\n return islice(iterable, n)", "def trim_iterable(iterable, limit, *, split=None, prefix='', postfix=''):\n if split is None:\n sl = 0\n join = False\n else:\n sl = len(split)\n join = True\n result = []\n rl = 0\n for element in iterable:\n element = prefix + element + postfix\n el = len(element)\n if len(result) > 0:\n el += sl\n rl += el\n if rl <= limit:\n result.append(element)\n else:\n break\n if join:\n result = split.join(result)\n return result", "def latest_backup(backups, level):\n tail = itertools.dropwhile(lambda x: len(x) != level, backups)\n try:\n return next(tail)\n except StopIteration:\n return None", "def test_popleft_works_on_diff_iterables_head_is_last_val_pushed_after_pop(iterable):\n from deque import Deque\n new_deque = Deque(iterable)\n new_deque.popleft()\n assert new_deque._container.head.val == iterable[-1]", "def get_last(self):\n return self.get_block(len(self.chain)-1)", "def consume(iterator, n=None):\n # Use functions that consume iterators at C speed.\n if n is None:\n # feed the entire iterator into a zero-length deque\n collections.deque(iterator, maxlen=0)\n else:\n # advance to the empty slice starting at position n\n next(islice(iterator, n, n), None)", "def last(self):\n if self.is_empty():\n raise Empty(\"Deque is empty\")\n return self._trailer._prev._element #real item just before trailer", "def peek(self):\n # TODO: Return top item, if any\n print(\"self.list P\", self.list)\n print(\"length\", self.length())\n if self.is_empty():\n return None\n else:\n return self.list[self.length()-1]\n # do n-1\n # return self.list[-]", "def consume_all(self, max_loops=None):\n curr_loop = 0\n if max_loops is None:\n max_loops = float('inf')\n while curr_loop < max_loops:\n curr_loop += 1\n try:\n self.consume_next()\n except StopIteration:\n break", "def pop(self):\n if self._pushed:\n rv = self._pushed.pop()\n else:\n rv = self._generator.next()\n self.last = rv\n return rv", "def pop_last(self):\n self.pop_item(-1)", "def consume(iterator, n=None):\n # Use functions that consume iterators at C speed.\n if n is None:\n # feed the entire iterator into a zero-length deque\n deque(iterator, maxlen=0)\n else:\n # advance to the empty slice starting at position n\n next(islice(iterator, n, n), None)", "def take_while(self, test):\n # type: (Callable) -> List[T]\n passing_elements = [] # type: List[T]\n while self.has_next() and test(self.peak()):\n passing_elements.append(self.next())\n return passing_elements", "def drop_while(coll, func): \n i = 0\n while i < len(coll) and func(coll[i]):\n i += 1\n return coll[i:]", "def peek(self):\n return self.items[len(self.items)-1]", "def take(num, iterable):\n for i, e in enumerate(iterable):\n if i >= num:\n break\n yield e", "def last(self):\n if self.ordered:\n queryset = self.reverse()\n else:\n self._check_ordering_first_last_queryset_aggregation(method=\"last\")\n queryset = self.order_by(\"-pk\")\n for obj in queryset[:1]:\n return obj", "def peek(self):\n if self.is_empty():\n return None\n list_length = len(self.list) - 1\n return self.list[list_length]", "def last(self, callback: Callable = None) -> Any:\n if callback:\n return self.filter(callback).last()\n\n return self[-1]", "def slice(iterable, *args):\n return iter(it.islice(iterable, *args))", "def ensure_unique(iterable, max_skips=float('inf')):\n old = set()\n iterator = iter(iterable)\n try:\n while True:\n i = 0\n while i <= max_skips:\n x = next(iterator)\n if x not in old:\n yield x\n old.add(x)\n break\n i += 1\n else:\n break\n except StopIteration:\n pass", "def iterator_peek(iterator: Iterator[T], count: int) -> tuple[list[T], Iterator[T]]:\n\n ret = []\n for _ in range(count):\n try:\n ret.append(next(iterator))\n except StopIteration:\n break\n\n return ret, chain(ret, iterator)", "def drop(it, num_to_skip: int): # noqa: F811\n check_arg(\n num_to_skip >= 0,\n \"Number of items to skip must be positive but got %s\",\n (num_to_skip,),\n )\n if hasattr(it, \"__next__\"):\n return itertools.islice(it, num_to_skip, None)\n else:\n return _DropIterable(it, num_to_skip)", "def test_deque_works_on_diff_iterables(iterable):\n from deque import Deque\n new_deque = Deque(iterable)\n assert new_deque.pop() == iterable[-1]", "def after_last(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[matches[-1]+1:])", "def pop(self, last=True):\r\n if not self:\r\n raise KeyError('set is empty')\r\n key = reversed(self).next() if last else iter(self).next()\r\n self.discard(key)\r\n return key", "def tail(real_iter, n_th):\n if n_th <= 0:\n return []\n\n real_list = list(real_iter)\n start = len(real_list)-n_th if n_th < len(real_list) else 0\n return real_list[start:]", "def test_deque_works_on_diff_iterables_tail_is_first_val_pushed(iterable):\n from deque import Deque\n new_deque = Deque(iterable)\n new_deque.pop()\n assert new_deque._container.tail.val == iterable[0]", "def pop(self):\n\n if not self.empty:\n i = self._begin\n\n self._begin = (self._begin + 1) % self._capacity\n self._size -= 1\n\n return (self[i])\n else:\n raise ValueError", "def last(self):\n return self.deque[-1]", "def lookahead(n, iterable):\n for value in islice(copy.copy(iterable), n, None):\n return value\n raise IndexError(n)", "def last(self, rows: List[Row]) -> List[Row]:\n if not rows:\n logger.warning(\"Trying to get last row from an empty list\")\n return []\n return [rows[-1]]", "def _chunk_end(c):\n end = None\n if isinstance(c, list):\n for e in c:\n if end is None or e.offset + e.length > end:\n end = e.offset + e.length\n else:\n end = c.offset + c.length\n return end", "def findlastindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1)", "def pop_rear(self):\n if len(self) == 0:\n if self.none_for_empty:\n return None\n raise ValueError(\"Buffer is empty\")\n pt = self.buf[self.rear]\n if self.rear == self.front:\n self.rear = None\n else:\n self.rear = (self.rear + 1) % self.length\n return pt", "def unzip_longest_finite(\n iterable: Iterable[Tuple[Any, ...]],\n *,\n fillvalue: Optional[Any] = None,\n) -> Tuple[Iterator[Any], ...]:\n for zipped in zip(*iterable):\n yield tuple(item for item in zipped if item != fillvalue)", "def peek(self):\n\n if self.is_empty():\n return None\n\n return self._list[-1]", "def at_last(self):\n return self._collection.at_last()", "def get_consistent_generator(iterable):\n try:\n first = next(iterable)\n except StopIteration:\n return None\n\n if first is None:\n return None\n\n return itertools.chain([first], iterable)", "def next(self):\n # type: () -> T\n if len(self.buffer) == 0:\n raise StopIteration\n self.prev = self.buffer.pop()\n self._buffer_to(self.lookahead)\n return self.prev", "def last(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._tail._element", "def drop(iterable, n):\n counter = 0\n for element in iterable:\n if counter < n:\n counter += 1\n else:\n yield element", "def peek(self):\n if self.items:\n return self.items[-1]\n return None", "def peek(self):\n if self.items:\n return self.items[-1]\n return None", "def last(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._tail._prev._element", "def get_outdated_containers(prefix, num_to_keep=2):\n most_recent = []\n\n for container_name in pyrax.cloudfiles.list_containers():\n if container_name.startswith(prefix):\n container = pyrax.cloudfiles.get_container(container_name)\n last_modified = get_container_last_modified(container)\n\n if last_modified:\n most_recent.append((last_modified, container))\n\n most_recent.sort()\n most_recent.reverse()\n\n if len(most_recent) > num_to_keep:\n yield most_recent.pop()", "def _advance(ls: List[any]) -> Optional[any]:\r\n try:\r\n return ls.pop(0)\r\n except IndexError:\r\n return None", "def get_next_ignore(remove=False):\n next_ignore = self.ignore_next\n\n # Just want to return it, don't want to remove yet\n if not remove:\n if type(self.ignore_next) in (list, tuple):\n next_ignore = self.ignore_next[0]\n return next_ignore\n\n # Want to remove it from ignore_next\n if type(next_ignore) in (list, tuple) and next_ignore:\n next_ignore = self.ignore_next.pop(0)\n elif not next_ignore:\n self.next_ignore = None\n next_ignore = None\n else:\n self.next_ignore = None\n\n return next_ignore", "def peek(self, numElems): \n self._ensureNumElems(numElems)\n # Take either numElems, or the entire head if not enough elements.\n last = min(self.head.shape[0], numElems)\n return self.head[:last]", "def peek(self):\n if not self.items:\n return None\n return self.items[-1]", "def take(n, seq):\n return itertools.islice(seq, n)", "def dequeue_rear(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The deque is empty')", "def postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg", "def test_removing_the_last_val_in_deque(filled_deque):\n assert filled_deque.pop() == 1" ]
[ "0.65945584", "0.6441955", "0.6378709", "0.6298682", "0.62027675", "0.5920305", "0.58600926", "0.56532353", "0.5636033", "0.5536152", "0.54646903", "0.5434246", "0.5425633", "0.53895926", "0.5381128", "0.5375591", "0.5357746", "0.5334514", "0.5308047", "0.52714187", "0.5265742", "0.52291846", "0.52204406", "0.51856124", "0.51844627", "0.5172889", "0.5167413", "0.51408297", "0.51221836", "0.5112314", "0.5104834", "0.5104834", "0.5104834", "0.50975215", "0.50792706", "0.50659746", "0.50573516", "0.5047664", "0.5042493", "0.5042493", "0.5039447", "0.5032606", "0.5027693", "0.5027693", "0.5010708", "0.49992162", "0.49860916", "0.4976079", "0.4969268", "0.49669126", "0.49633896", "0.49390006", "0.49213207", "0.491952", "0.49172583", "0.49146488", "0.49121463", "0.49035406", "0.48999017", "0.48971158", "0.4891713", "0.4885091", "0.48849094", "0.48515332", "0.48399246", "0.48376107", "0.4831028", "0.48229566", "0.4819887", "0.48055705", "0.4804836", "0.4802477", "0.48016813", "0.4798598", "0.47897744", "0.47839385", "0.47820863", "0.4772726", "0.47722766", "0.4768009", "0.47586888", "0.47544122", "0.475149", "0.47463408", "0.4745556", "0.47454152", "0.47400695", "0.47330546", "0.47313467", "0.47313467", "0.4716096", "0.47150022", "0.47147608", "0.47113556", "0.4704519", "0.47044557", "0.46939787", "0.46926087", "0.4690642", "0.46732032" ]
0.7717289
0
Construct an object of the given data_type containing the given arguments.
def makedata(data_type, *args): if _coconut.hasattr(data_type, "_make") and _coconut.issubclass(data_type, _coconut.tuple): return data_type._make(args) if _coconut.issubclass(data_type, (_coconut.map, _coconut.range, _coconut.abc.Iterator)): return args if _coconut.issubclass(data_type, _coconut.str): return "".join(args) return data_type(args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, data_type=None):\n self.type = data_type", "def from_data(cls, data):\n return object.__new__(cls)", "def create(cls, data=None):\n # allow create() calls with no input\n if not data:\n data = {}\n\n return cls(**data)", "def __init__(self,\n type_id: int,\n data,\n data_type: DataType = DataType.AUTODETECT,\n length=-1):\n if type_id < 0 or 255 < type_id:\n raise ValueError('The type_id parameter must between 0 and 255 but is {val}'.format(val=type_id))\n self.type_id = type_id\n self.data_type = data_type\n self.data = data\n self.length = length", "def _create_Data(classname, typename):\n attributes = {\"typename\": typename}\n if \"complex\" in typename:\n def format_sampler(self, val):\n \"\"\"Format surrounded by [] for the sampler.\n\n 2x space (for real and complex parts).\n \"\"\"\n if isinstance(val, int):\n return \"[%s]\" % (2 * val)\n return val\n attributes[\"format_sampler\"] = format_sampler\n globals()[classname] = type(classname, (Data,), attributes)", "def build(cls, data, _type): # type: ignore[no-untyped-def]\n if isinstance(data, list):\n backing = Int32Field(cls._convert_to_int(data))\n elif isinstance(data, Int32Field):\n backing = data\n elif isinstance(data, float):\n backing = Int32Field(int(data * sensor_fixed_point_conversion))\n else:\n backing = Int32Field(data)\n as_int = int(backing.value)\n if isinstance(_type, SensorTypeField):\n _converted_type = SensorType(_type.value)\n else:\n _converted_type = _type\n return cls(backing, as_int, _converted_type)", "def __init__(self, *args):\n if len(args) == 1 and isinstance(args[0], str):\n self._data = tuple(int(i) for i in str(args[0]).split(\".\"))\n elif len(args) == 1 and isinstance(args[0], Iterable):\n self._data = tuple(int(i) for i in args[0])\n else:\n self._data = tuple(int(i) for i in args)", "def __init__(self, typt_type: Type, data: str, *args, **kwargs):\n self.typt_type = typt_type\n self.data = data\n\n super().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n if not args:\n raise TypeError('Field definition incorrect, please provide type')\n elif not isinstance(args[0], type):\n raise TypeError('Field input not a type')\n self.data_type = args[0]\n if ((self.data_type not in self.allowed_types and\n not issubclass(self.data_type, self.allowed_types))):\n raise TypeError('Field input type %s is not allowed' % self.data_type)\n self.check_kwargs(kwargs, self.data_type)\n # attributes\n if 'auto_update' in kwargs and kwargs['auto_update']:\n self.auto_update = self.data_type.utcnow # datetime.datetime\n if 'document_class' in kwargs and kwargs['document_class']:\n self.document_class = kwargs['document_class']\n self.validator = self.generate_validator(self.data_type, **kwargs)\n self.required = kwargs['required'] if 'required' in kwargs else True\n if 'default' in kwargs:\n self.default_value = kwargs['default']\n if not callable(self.default_value):\n validation_failed = False\n try:\n self.validator(self.default_value)\n except ValidationError as e:\n new_err = ('default value \"%s\"' % kwargs['default']) + ''.join(e.args)\n validation_failed = True\n if validation_failed:\n raise TypeError(new_err)\n # check if dict/list type and wrap copy in callable\n if isinstance(self.default_value, (dict, list)):\n def default_value_wrapper():\n return copy.deepcopy(kwargs['default'])\n self.default_value = default_value_wrapper", "def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data", "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def __new__(cls,name,description,args_in,required=True,data_type=None,schema=None):\n mydict={\n \"name\":name,\n \"description\":description,\n \"in\":args_in,\n \"required\":required,\n \"schema\":schema,\n \"type\":data_type,\n }\n if args_in!=\"body\":\n mydict[\"type\"]=data_type\n return mydict", "def from_data(cls,data):\n\n new_object = cls() # Only this line needs to be updated\n new_object.data = data\n\n return new_object", "def _instantiate(clz, **data):\n\n new_obj = clz()\n setattr(new_obj, \"data\", data)\n for key, val in deepcopy(data).items():\n setattr(new_obj, key, val)\n return new_obj", "def from_data(cls, data):\n self = object.__new__(cls)\n self.required = parse_required(data)\n self.title = parse_title(data)\n self.type = parse_type(data)\n self.values = parse_values(data)\n return self", "def load(cls, data):\n return cls(**data)", "def __init__(__self__, *,\n data_type: pulumi.Input['AssetModelDataType'],\n logical_id: pulumi.Input[str],\n name: pulumi.Input[str],\n type: pulumi.Input['AssetModelPropertyTypeArgs'],\n data_type_spec: Optional[pulumi.Input['AssetModelDataTypeSpec']] = None,\n unit: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"data_type\", data_type)\n pulumi.set(__self__, \"logical_id\", logical_id)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"type\", type)\n if data_type_spec is not None:\n pulumi.set(__self__, \"data_type_spec\", data_type_spec)\n if unit is not None:\n pulumi.set(__self__, \"unit\", unit)", "def createData(self, address: ghidra.program.model.address.Address, datatype: ghidra.program.model.data.DataType) -> ghidra.program.model.listing.Data:\n ...", "def __init__(self, data, time_unit, metadata=None):\r\n # Check that sensible time units were given\r\n if time_unit not in time_unit_conversion:\r\n raise ValueError('Invalid time unit %s, must be one of %s' %\r\n (time_unit, time_unit_conversion.keys()))\r\n\r\n #: the data is an arbitrary numpy array\r\n self.data = np.asanyarray(data)\r\n self.time_unit = time_unit\r\n\r\n # Every instance carries an empty metadata dict, which we promise never\r\n # to touch. This reserves this name as a user area for extra\r\n # information without the danger of name clashes in the future.\r\n if metadata is None:\r\n self.metadata = {}\r\n else:\r\n self.metadata = metadata", "def __init__(self, type, data):\n # Check type is type int\n if not isinstance(type, int):\n raise TypeError(\"ext type is not type integer\")\n # Check data is type bytes\n elif sys.version_info[0] == 3 and not isinstance(data, bytes):\n raise TypeError(\"ext data is not type \\'bytes\\'\")\n elif sys.version_info[0] == 2 and not isinstance(data, str):\n raise TypeError(\"ext data is not type \\'str\\'\")\n self.type = type\n self.data = data", "def create(self, data):\n raise NotImplementedError", "def __init__(self, type_name, args):\n super().__init__()\n self.type_name = type_name\n self.args = args\n self._projection = None", "def create(self, cls, data=None):\n return cls(self, initial_data=data)", "def __init__(self, *args, **kwargs):\n nargs = len(args) + len(kwargs)\n if nargs == 0:\n raise TypeError(\"one or more arguments required (0 given)\")\n \n first_arg = args[0]\n if isinstance(first_arg, str):\n if nargs > 2 or (nargs > 1 and \"quiet\" not in kwargs):\n raise TypeError(\n \"incorrect arguments for creating Dta from file\"\n )\n self._new_from_file(*args, **kwargs)\n elif isinstance(first_arg, Dta):\n if nargs > 3:\n raise TypeError(\n \"too many arguments to create Dta from existing Dta\"\n )\n self._new_from_dta(*args, **kwargs)\n elif isinstance(first_arg, collections.Iterable):\n self._new_from_iter(*args, **kwargs)\n else:\n raise TypeError(\"Dta cannot be created from these arguments:\")", "def __new__(subtype,parent,name,typecode,dimensions,**kwds):\n if 'values' in kwds.keys():\n result=kwds.pop('values')\n else:\n shape=[]\n for d in dimensions:\n dim = parent.dimensions[d]\n\n # Adding support for netCDF3 dimension objects\n if not isinstance(dim, int):\n dim = len(dim)\n shape.append(dim)\n\n result=np.zeros(shape,typecode)\n \n result=result[...].view(subtype)\n\n result.typecode = lambda: typecode\n result.dimensions = tuple(dimensions)\n result._ncattrs = ()\n for k,v in kwds.items():\n setattr(result,k,v)\n return result", "def _create_dataclass(obj, plain_dict=False):\n if plain_dict:\n items = obj\n name = \"Obj\"\n else:\n name = obj[\"class_name\"]\n items = obj[\"data\"]\n\n cls = dataclasses.make_dataclass(name, items.keys())\n return cls(**items)", "def __init__(self, *args, name=''):\n from collections import Iterable\n if len(args) == 1:\n if isinstance(args[0], Point):\n self.data = args[0].data.copy()\n elif isinstance(args[0], Iterable):\n self.data = list(args[0])\n else:\n self.data = list(args)\n self.name = name if not name.isspace() else ''", "def _make_cpp_data(id, timestamp, instrument, exchange, data):\n return DataCpp(id, timestamp, instrument, exchange, data)", "def __init__(self, data_type, other_props=None):\n if data_type not in VALID_TYPES:\n raise SchemaParseException('%r is not a valid Avro type.' % data_type)\n\n # All properties of this schema, as a map: property name -> property value\n self._props = {}\n\n self._props['type'] = data_type\n self._type = data_type\n\n if other_props:\n self._props.update(other_props)", "def __init__(self, name, data_type, functional=False, locking=True, indexed=False, unique=False):\r\n self.name = name\r\n self.data_type = data_type\r\n self.functional = functional\r\n self.locking = locking\r\n self.indexed = indexed\r\n self.unique = unique", "def from_dict(cls, connection, data):\n\t\tif data.get('type') == \"gauge\":\n\t\t\tcls = Gauge\n\t\telif data.get('type') == \"counter\":\n\t\t\tcls = Counter\n\n\t\tobj = cls(connection, data['name'])\n\t\tobj.description = data['description']\n\t\tobj.period = data['period']\n\t\tobj.attributes = data['attributes']\n\t\tobj.description = data['description']\n\n\t\treturn obj", "def from_dict(cls, data):\n return cls(**data)", "def __init__(self, mtype=None):\n self.data = pd.DataFrame()\n self.fields = list()\n\n # Set data type\n if mtype not in DataContainer.mergeTypes and mtype is not None:\n raise exceptions.TypeError\n else:\n self.mtype = mtype", "def create(self, objecttype, under, **kwargs):\n self.LogCommand()\n tclcode = \"stc::create \" + objecttype + \" -under \" + under\n\n for key in kwargs:\n tclcode = tclcode + \" \" + \"-\" + key + \" \" + str(kwargs[key])\n\n objecthandle = self.Exec(tclcode)\n logging.debug(\" - Python result - \" + str(objecthandle))\n return objecthandle", "def __new__(subtype,parent,name,typecode = 'f',dimensions = (),**kwds):\n __array_priority__ = 1000000000.\n if 'values' in kwds.keys():\n result=kwds.pop('values')\n else:\n shape=[]\n for d in dimensions:\n dim = parent.dimensions[d]\n\n # Adding support for netCDF3 dimension objects\n if not isinstance(dim, int):\n dim = len(dim)\n shape.append(dim)\n\n result=np.ma.zeros(shape, dtype = 'S1' if typecode == 'c' else typecode, fill_value = kwds.get('fill_value', None))\n\n result=result.view(type = subtype)\n result._ncattrs = ()\n result.typecode = lambda: typecode\n result.dimensions = tuple(dimensions)\n for k,v in kwds.items():\n setattr(result,k,v)\n return result", "def __init__(self, *args):\n self.types = tuple([trait_from(arg) for arg in args])\n self.fast_validate = (9, self.types)", "def _new_instance(cls, data_list, meta=None, common_axis=None):\n return cls(data_list, meta=meta, common_axis=common_axis)", "def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\"construct_array_type does not support arguments\")\n return FletcherArray", "def make(self, atype, **kwargs):\n obj = self.api.get_type(f\"VSO:{atype}\")\n return obj(**kwargs)", "def build_user(data: Dict[Any, Any]) -> User:\n return User(**data)", "def from_instance_type(cls, instance_type_arg, entrypoint=\"*\"):\n if instance_type_arg is None:\n return cls(None)\n if isinstance(instance_type_arg, basestring):\n # By default, all entry points (\"*\") should use this instance type\n return cls({entrypoint: {\"instanceType\": instance_type_arg}})\n if isinstance(instance_type_arg, dict):\n # instance_type is a map of entry point to instance type\n return cls({fn: {\"instanceType\": fn_inst} for fn, fn_inst in instance_type_arg.items()})\n raise DXError('Expected instance_type field to be either a string or a dict')", "def _doc_create(type, data):\n doc = dict(data)\n doc.update({'model_type': type})\n return doc", "def safe_create_data(self, typename, init_func):\r\n # Basically just an alias for readability.\r\n self.get_data(typename, init_func)", "def create(cls, *args):\n c = cls({})\n c.apply(*args)\n return c", "def from_bytes(data: Union[int, bytes]) -> \"TypeInfo\":\n # pylint: disable=attribute-defined-outside-init\n\n if isinstance(data, bytes):\n data = int.from_bytes(data, 'big')\n # pylint: disable=protected-access\n if data == 0b11111111:\n raise NotImplementedError('Long identifier types are not yet '\n 'implemented')\n cls_hint = (data & 0b11000000) >> 6\n pc_hint = (data & 0b00100000) >> 5\n value = data & 0b00011111\n\n if cls_hint == 0b00:\n cls = TypeInfo.UNIVERSAL\n elif cls_hint == 0b01:\n cls = TypeInfo.APPLICATION\n elif cls_hint == 0b10:\n cls = TypeInfo.CONTEXT\n elif cls_hint == 0b11:\n cls = TypeInfo.PRIVATE\n else:\n pass # Impossible case (2 bits can only have 4 combinations).\n\n priv_const = TypeInfo.CONSTRUCTED if pc_hint else TypeInfo.PRIMITIVE\n\n instance = TypeInfo(cls, priv_const, value)\n instance._raw_value = data\n return instance", "def make_daysetting_from_data(data):\r\n factory = {\r\n \"color\": ColorType,\r\n \"scalar\": ScalarType\r\n }\r\n if data[\"type\"] not in factory:\r\n raise Exception(\"Unkown setting type: {}\".format(data[\"type\"]))\r\n\r\n try:\r\n instance = factory[data[\"type\"]](data)\r\n except Exception:\r\n print(\"Exception occured while parsing\", data)\r\n raise\r\n\r\n if data:\r\n raise Exception(\"Unparsed data left in plugin setting: {}\".format(data))\r\n return instance", "def __init__(self, shape, dtype='float32'):\n if not isinstance(shape, (tuple, list)):\n raise TypeError('shape must be a tuple or list: %s' % str(shape))\n self._type_shape = loom.TypeShape(dtype, shape)", "def create(*args):", "def __call__(self, *args):\n return TypeCall(self, args)", "def test_generate_data_model():\n params = dict(name=\"test\", type_=str, is_required=True)\n\n data_model = DataModel(\"test\", [Attribute(**params)])\n\n assert generate_data_model(\"test\", {\"test\": \"str\"}) == data_model", "def factory(*args):\n\n def wrapper(dataset):\n return Factory(dataset, *args)\n\n return wrapper", "def __init__(self, data=(1.0, numpy.array((0., 0., 0.), 'f'))):\n try:\n self.real = float(data[0])\n self.pure = numpy.array((data[1][0], data[1][1], data[1][2]), 'f')\n except:\n raise ValueError(\"1Arguments must be (c,(x,y,z))\")\n if len(self.pure) != 3:\n raise ValueError(\"2Arguments must be (c,(x,y,z))\")", "def make_instance(self, data, **kwargs):\n instance = self.instance or self.get_instance(data)\n if instance is not None:\n for key, value in iteritems(data):\n setattr(instance, key, value)\n return instance\n kwargs, association_attrs = self._split_model_kwargs_association(data)\n instance = self.opts.model(**kwargs)\n for attr, value in iteritems(association_attrs):\n setattr(instance, attr, value)\n return instance", "def create(self, **kargs):\n return self(**kargs)", "def __init__(\n self,\n name: str,\n source: str,\n value: np.ndarray,\n time: np.ndarray,\n units: str,\n data_type: str,\n metadata: Dict[str, np.ndarray] = None,\n ):\n super().__init__(name, source, value, time, units)\n self.data_type = data_type\n if metadata:\n self.metadata = metadata", "def safe_create_data(self, typename, init_func):\n # Basically just an alias for readability.\n return self.get_data(typename, init_func)", "def make_object(self, data, **kwargs):\n if data is None:\n raise ValidationError(\"No data was provided\")\n return Artist(**data)", "def build(self, data: dict):", "def from_data(cls, data):\n self = object.__new__(cls)\n self.id = parse_id(data)\n self._set_icon(data)\n self.bot = parse_bot(data)\n self.description = parse_description(data)\n self.name = parse_name(data)\n return self", "def init_data(world, data_type):\n\n # generate a number of events, the max number allowed is the whole area of the world, so if world is [-10;10]\n # then max number should be 21*21\n number_of_events = DataGenerator.generate_number_of_events((world.grid_size*2 + 1) ** 2)\n\n unavailable_positions = set() # set is used to store the already in use positions to ensure fast 'contains'\n\n for i in range(0, number_of_events, 1):\n # generate available position for the new event\n x, y = DataGenerator.generate_available_position(unavailable_positions, world.grid_size*2)\n x -= world.grid_size\n y -= world.grid_size\n\n # generate new object of type 'data_type', in this case this is object of type Event\n event = data_type(i, DataGenerator.generate_event_tickets())\n # register the event\n world.register_event(event, x, y)", "def __init__(self, raw_arg: Dict):\n self.name = raw_arg.get(\"name\")\n self.description = raw_arg.get(\"description\")\n self.type = TypeDefer(raw_arg.get(\"type\")) if raw_arg.get(\"type\") is not None else None\n self.default_value = raw_arg.get(\"defaultValue\")", "def __init__(self, name, data_type=\"Vertex\"):\r\n self.name = name\r\n self.data_type = data_type", "def pack(*args):\n result = np.empty(len(args), dtype=object)\n for i, arg in enumerate(args):\n result[i] = arg\n return result", "def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def __init__(self, type, value,):\n self.type = type\n self.value = value", "def __init__(self, type, value):\n self.type = type\n self.value = value", "def __init__(self, type, value):\n self.type = type\n self.value = value", "def __init__(self, data: Any, event_type: int):\n wx.PyEvent.__init__(self)\n self.SetEventType(event_type)\n self.data = data", "def __init__(\n self,\n data_type,\n name,\n index,\n has_default,\n default=_NO_DEFAULT,\n order=None,\n doc=None,\n other_props=None\n ):\n if (not isinstance(name, _str)) or (not name):\n raise SchemaParseException('Invalid record field name: %r.' % name)\n if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):\n raise SchemaParseException('Invalid record field order: %r.' % order)\n\n # All properties of this record field:\n self._props = {}\n\n self._has_default = has_default\n if other_props:\n self._props.update(other_props)\n\n self._index = index\n self._type = self._props['type'] = data_type\n self._name = self._props['name'] = name\n\n if has_default:\n self._props['default'] = default\n\n if order is not None:\n self._props['order'] = order\n\n if doc is not None:\n self._props['doc'] = doc", "def __init__(self, data_model: DataModel) -> None:\n\n # DataModel\n self._data_model = data_model\n\n # Dict[ParameterName, Any]\n self._param_to_value: Dict[ParameterName, Any] = {}\n\n # Dict[ParameterName, Dict[ParameterName, Any]]\n self._numbered_objects: Dict[ParameterName, Dict[ParameterName, Any]] = {}\n # If adding a PLMN object, then you would set something like\n # self._numbered_objects['PLMN_1'] = {'PLMN_1_ENABLED': True}", "def __init__(self, data_type=None):\n super(EventData, self).__init__()\n self._event_data_stream_identifier = None\n self._event_data_stream_row_identifier = None\n self.data_type = data_type\n self.offset = None\n self.parser = None\n self.query = None", "def __init__(self, xy, **kwds):\n self.data = xy", "def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\n \"construct_array_type does not support arguments\")\n return XndframesArray", "def _make_seq_container(\n klass, data, moltype=None, label_to_name=None, info=None, source=None, **kw\n):\n if moltype is not None:\n moltype = get_moltype(moltype)\n\n info = info or {}\n for other_kw in (\"constructor_kw\", \"kw\"):\n other_kw = kw.pop(other_kw, None) or {}\n kw.update(other_kw)\n assert isinstance(info, dict), \"info must be a dict\"\n source = source or info.get(\"source\", \"unknown\")\n info[\"source\"] = str(source)\n\n return klass(\n data=data, moltype=moltype, label_to_name=label_to_name, info=info, **kw\n )", "def __init__(self, _id, **data):\n\n default = {\n '_id' : _id, # required\n 'name' : u'', # human readable name\n 'description' : u'', # human readable description\n 'contains' : [], # list of content type it is allowed to contain or 'all', empty means none\n 'fields' : [('title', 'string')], # list of fields and types this type understands\n 'required_fields' : ['title'], # list of required fields\n 'reprs' : ['default', 'contents'], # list of representations\n 'default_repr' : 'default', # default representation\n 'mgr' : None, # instance of the responsible content manager \n 'cls' : None, # type implementation\n }\n\n d = copy.copy(default)\n d.update(data)\n self.update(d)", "def __init__(self, data):\n self.data = data\n return", "def from_data(cls, data):\n # Validation\n if data.get(\"_Serializable_classname\") != cls.__name__:\n return None\n del data[\"_Serializable_classname\"]\n if data.get(\"_Serializable_version\") is not None:\n del data[\"_Serializable_version\"]\n\n this = cls(None)\n this.__dict__.update(data)\n return this", "def from_json(data: Dict[str, Any]) -> 'ModelData':\n return ModelData(data.get(\"name\"),\n data.get(\"type\"),\n data.get(\"value\"),\n data.get(\"units\"))", "def __init__(self, *args: Union[List[AtomKey], DKT], **kwargs: str) -> None:\n ...", "def __init__(self,given_type):\n self.given_type=given_type", "def __init__(self, raw_type: type):\n self.raw_type = raw_type\n self.name = raw_type.__name__\n self.qualname = raw_type.__qualname__\n self.module = raw_type.__module__\n self.full_name = TypeInfo.to_full_name(raw_type)\n self.hash = hash(self.full_name)\n self.is_abstract = inspect.isabstract(raw_type)\n # TODO(fk) store more information on attributes\n self.instance_attributes: OrderedSet[str] = OrderedSet()\n self.attributes: OrderedSet[str] = OrderedSet()\n\n # TODO(fk) properly implement generics!\n # For now we just store the number of generic parameters for set, dict and list.\n self.num_hardcoded_generic_parameters: int | None = (\n 2 if raw_type is dict else 1 if raw_type in (set, list) else None\n )", "def __init__(\n self,\n name,\n data_context=None,\n data_asset_type=None,\n batch_kwargs_generators=None,\n **kwargs,\n ) -> None:\n self._data_context = data_context\n self._name = name\n # deprecated-v0.7.11\n if isinstance(data_asset_type, str):\n warnings.warn(\n \"String-only configuration for data_asset_type is deprecated as of v0.7.11. \"\n \"As support will be removed in v0.16, please use module_name and class_name instead.\",\n DeprecationWarning,\n )\n self._data_asset_type = data_asset_type\n self._datasource_config = kwargs\n self._batch_kwargs_generators: dict = {}\n\n self._datasource_config[\"data_asset_type\"] = data_asset_type\n if batch_kwargs_generators is not None:\n self._datasource_config[\"batch_kwargs_generators\"] = batch_kwargs_generators\n\n # Chetan - 20221103 - This attribute is meant to represent the config args used to instantiate the object (before ${VARIABLE} substitution).\n # While downstream logic should override this value, we default to `self._datasource_config` as a backup.\n # This is to be removed once substitution logic is migrated from the context to the individual object level.\n self._raw_config = self._datasource_config", "def test_constructor(cls, data):\n do_constructor_test(cls, data)", "def __init__(\n self,\n type_: Type[T],\n *,\n type_is_generic_self: bool = False,\n coerce: bool = False,\n compcoef: Optional[float] = None,\n inheritable: bool = True,\n simpledelta: bool = True,\n merge_fn: MergeFunction = default_field_merge,\n ephemeral: bool = False,\n weak_ref: bool = False,\n allow_ddl_set: bool = False,\n describe_visibility: DescribeVisibilityPolicy = (\n DescribeVisibilityPolicy.SHOW_IF_EXPLICIT),\n ddl_identity: bool = False,\n aux_cmd_data: bool = False,\n special_ddl_syntax: bool = False,\n reflection_method: ReflectionMethod = ReflectionMethod.REGULAR,\n reflection_proxy: Optional[Tuple[str, str]] = None,\n name: Optional[str] = None,\n reflection_name: Optional[str] = None,\n patch_level: int = -1,\n **kwargs: Any,\n ) -> None:\n if not isinstance(type_, type):\n raise ValueError(f'{type_!r} is not a type')\n\n self.type = type_\n self.type_is_generic_self = type_is_generic_self\n self.coerce = coerce\n self.allow_ddl_set = allow_ddl_set\n self.ddl_identity = ddl_identity\n self.aux_cmd_data = aux_cmd_data\n self.special_ddl_syntax = special_ddl_syntax\n self.describe_visibility = describe_visibility\n\n self.compcoef = compcoef\n self.inheritable = inheritable\n self.simpledelta = simpledelta\n self.weak_ref = weak_ref\n self.reflection_method = reflection_method\n self.reflection_proxy = reflection_proxy\n self.is_reducible = issubclass(type_, s_abc.Reducible)\n self.patch_level = patch_level\n\n if name is not None:\n self.name = name\n if reflection_name is not None:\n self.sname = reflection_name\n\n if (\n merge_fn is default_field_merge\n and callable(\n type_merge_fn := getattr(self.type, 'merge_values', None)\n )\n ):\n self.merge_fn = type_merge_fn\n else:\n self.merge_fn = merge_fn\n\n self.ephemeral = ephemeral", "def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)", "def make_object(self, data, **kwargs):\n if not data:\n return None\n return Note(\n title=data['title'],\n content=data['content'],\n )", "def __init__(self, command=None, data_length=0, data=[]):\n if command is not None:\n self.command = command\n self.data_length = data_length\n self.data = data\n self.encode()\n else:\n self.message_length = 0\n self.command = 0\n self.data_length = 0\n self.data = []\n self.string = \"\"", "def FromData(cls, data):\n if not data:\n return None\n\n attribute_name = data['attribute_name']\n parameter_name = data['parameter_name']\n help_text = data['help']\n completion_id_field = data.get('completion_id_field', None)\n completion_request_params_list = data.get('completion_request_params', [])\n completion_request_params = {\n param.get('fieldName'): param.get('value')\n for param in completion_request_params_list\n }\n\n # Add property fallthroughs.\n fallthroughs = []\n prop = properties.FromString(data.get('property', ''))\n if prop:\n fallthroughs.append(deps_lib.PropertyFallthrough(prop))\n default_config = DEFAULT_RESOURCE_ATTRIBUTE_CONFIGS.get(attribute_name)\n if default_config:\n fallthroughs += [\n f for f in default_config.fallthroughs if f not in fallthroughs]\n # Add fallthroughs from python hooks.\n fallthrough_data = data.get('fallthroughs', [])\n fallthroughs_from_hook = [\n deps_lib.Fallthrough(util.Hook.FromPath(f['hook']), hint=f['hint'])\n for f in fallthrough_data\n ]\n fallthroughs += fallthroughs_from_hook\n return cls(\n name=attribute_name,\n help_text=help_text,\n fallthroughs=fallthroughs,\n completion_id_field=completion_id_field,\n completion_request_params=completion_request_params,\n parameter_name=parameter_name)", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }", "def __init__(self, msg_id=0, xtd=0, rtr=0, ndata=0, data=() ):\r\n self.msg_id = msg_id\r\n self.rtr = rtr\r\n self.xtd = xtd\r\n self.ndata = ndata\r\n self.data = data # tuple with length 0..8\r\n self.timestamp = time.time() # Timestamp of object creation\r", "def from_dict(data_class: Type[T], data: Data, config: Optional[Config] = None) -> T:\n init_values: MutableMapping[str, Any] = {}\n post_init_values: MutableMapping[str, Any] = {}\n config = config or Config()\n try:\n data_class_hints = cache(get_type_hints)(data_class, localns=config.hashable_forward_references)\n except NameError as error:\n raise ForwardReferenceError(str(error))\n data_class_fields = cache(get_fields)(data_class)\n if config.strict:\n extra_fields = set(data.keys()) - {f.name for f in data_class_fields}\n if extra_fields:\n raise UnexpectedDataError(keys=extra_fields)\n for field in data_class_fields:\n field_type = data_class_hints[field.name]\n if field.name in data:\n try:\n field_data = data[field.name]\n value = _build_value(type_=field_type, data=field_data, config=config)\n except DaciteFieldError as error:\n error.update_path(field.name)\n raise\n if config.check_types and not is_instance(value, field_type):\n raise WrongTypeError(field_path=field.name, field_type=field_type, value=value)\n else:\n try:\n value = get_default_value_for_field(field, field_type)\n except DefaultValueNotFoundError:\n if not field.init:\n continue\n raise MissingValueError(field.name)\n if field.init:\n init_values[field.name] = value\n elif not is_frozen(data_class):\n post_init_values[field.name] = value\n instance = data_class(**init_values)\n for key, value in post_init_values.items():\n setattr(instance, key, value)\n return instance", "def build(cls, **kwargs):\n new_object = cls()\n fields = get_fields(cls)\n fields = dict((field.field_name, field) for field in fields)\n for name, value in kwargs.items():\n object.__setattr__(new_object, name, value)\n \n return new_object", "def make_object(obj, kwargs):\n return obj(**kwargs)", "def __init__(self, data=None):\n self.data = data", "def __init__(self, data_type, other_props=None):\n if data_type not in PRIMITIVE_TYPES:\n raise AvroException('%r is not a valid primitive type.' % data_type)\n super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)", "def __init__(self, type, value):\r\n self._type = type\r\n self._value = value", "def _create_dataset(self, *data):\n # Make sure data is a tuple of dense tensors\n data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]\n return TensorDataset(*data)", "def __init__(self, data, chunksize, axis, shape, **kwargs):\n\n if shape is None:\n msg = 'A {} from a generating function requires a shape.'\n raise ValueError(msg.format('Producer'))\n\n super().__init__(data, chunksize, axis, **kwargs)\n self._shape = tuple(shape)", "def _ConstructType(self, type_name, type_contents, filepath, require_guid):\n\n description = ''\n parents = None\n local_field_names = None\n opt_local_field_names = None\n is_abstract = False\n allow_undefined_fields = False\n is_canonical = False\n guid = None\n\n expected_keys = set([\n 'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'guid',\n 'is_canonical', 'allow_undefined_fields'\n ])\n\n if 'description' in type_contents:\n description = type_contents['description']\n if 'implements' in type_contents:\n parents = type_contents['implements']\n if 'uses' in type_contents:\n local_field_names = type_contents['uses']\n if 'opt_uses' in type_contents:\n opt_local_field_names = type_contents['opt_uses']\n if 'is_abstract' in type_contents:\n is_abstract = type_contents['is_abstract']\n if 'allow_undefined_fields' in type_contents:\n allow_undefined_fields = type_contents['allow_undefined_fields']\n if 'is_canonical' in type_contents:\n is_canonical = type_contents['is_canonical']\n if 'guid' in type_contents:\n guid = type_contents['guid']\n\n # Generate tuples to represent each field\n fq_lfn = []\n if local_field_names:\n self._ConstructField(local_field_names, False, fq_lfn)\n if opt_local_field_names:\n self._ConstructField(opt_local_field_names, True, fq_lfn)\n\n entity_type = EntityType(\n filepath=filepath,\n typename=type_name,\n description=description,\n parents=parents,\n local_field_tuples=fq_lfn,\n is_abstract=is_abstract,\n allow_undefined_fields=allow_undefined_fields,\n inherited_fields_expanded=False,\n is_canonical=is_canonical,\n guid=guid,\n require_guid=require_guid,\n namespace=self.local_namespace)\n\n # Add errors to type if there's anything extra in the block. We add to the\n # entity type because an extra key here is likely a typo in a real key name\n # that would result in information being lost from the type.\n for key in type_contents:\n if key not in expected_keys:\n entity_type.AddFinding(\n findings_lib.UnrecognizedKeyError(key, entity_type.file_context))\n\n return entity_type" ]
[ "0.65806115", "0.6494723", "0.6493428", "0.6436474", "0.63050216", "0.61820936", "0.6163499", "0.61415094", "0.60745573", "0.6070686", "0.6051727", "0.60087764", "0.6002497", "0.5975732", "0.5947289", "0.58672804", "0.5859592", "0.58576393", "0.5827365", "0.5820931", "0.5812757", "0.58077604", "0.57782376", "0.57708186", "0.57500553", "0.5722772", "0.5706164", "0.5698229", "0.5673332", "0.5646855", "0.56355315", "0.5630463", "0.56230175", "0.5600061", "0.5599376", "0.5598776", "0.5592992", "0.55866843", "0.5532948", "0.5509131", "0.54956526", "0.54845774", "0.54640394", "0.54421836", "0.5426929", "0.54009455", "0.5387296", "0.5372426", "0.5368559", "0.5363305", "0.53574634", "0.5355723", "0.5354991", "0.5341363", "0.53405476", "0.5332795", "0.5323408", "0.5323222", "0.53215307", "0.5320995", "0.53209263", "0.5316719", "0.53140116", "0.5311106", "0.5304228", "0.53032506", "0.52980447", "0.52980447", "0.5284396", "0.52813274", "0.5275099", "0.5270877", "0.52667844", "0.526459", "0.5256874", "0.5255592", "0.5253775", "0.52520555", "0.5239939", "0.52382827", "0.5221848", "0.52195936", "0.521505", "0.52085", "0.5207051", "0.5201522", "0.51930475", "0.51874244", "0.5186573", "0.51861674", "0.51852995", "0.51841426", "0.51761055", "0.5176039", "0.5168704", "0.51673746", "0.5165471", "0.5161681", "0.51613915", "0.5148217" ]
0.7436465
0
fmap(func, obj) creates a copy of obj with func applied to its contents. Override by defining obj.__fmap__(func).
def fmap(func, obj): if _coconut.hasattr(obj, "__fmap__"): return obj.__fmap__(func) if obj.__class__.__module__ == "numpy": from numpy import vectorize return vectorize(func)(obj) return _coconut_makedata(obj.__class__, *(_coconut_starmap(func, obj.items()) if _coconut.isinstance(obj, _coconut.abc.Mapping) else _coconut_map(func, obj)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fmap(function, descriptor):\n return MappedDescriptor(descriptor, function)", "def map(self, func):\n return _(map(func, self._))", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def fmap(self, func):\n @wraps(self.v)\n def state_mapper(state, func=func, runner=self):\n result, new_state = runner(state)\n return (func(result), state)\n\n return State(state_mapper)", "def imap(self, func: Callable[[T], V]) -> '_[V]':\n return _(map(func, self.array))", "def map(self, func):\n if self.is_right(): return self.right.map(func)\n if self.is_left(): return self.left.map(func)", "def map(self, function):\n pass", "def __call__(self, func, *args, **kwds):\r\n results = self.map(func, *args, **kwds)\r\n if results:\r\n return results[0]", "def methdispatch(func): \n dispatcher = singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, func)\n return wrapper", "def methdispatch(func):\n # ref:\n # https://stackoverflow.com/questions/24601722/how-can-i-use-functools-singledispatch-with-instance-methods\n dispatcher = singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[1].__class__)(*args, **kw)\n wrapper.register = dispatcher.register\n update_wrapper(wrapper, func)\n return wrapper", "def map(self, func, inplace=True):\n # only leaves have to be adapted\n new_leaves = [func(l) for l in self.leaves]\n if inplace:\n self.leaves = new_leaves\n return self\n else:\n return Structure(struct=self.struct, leaves=new_leaves)", "def pfmap(func, workers=8):\n return fmap(func)", "def transform(self, func):\n return func(self)", "def imap_c(func):\n return functools.partial(imap, func)", "def map(self, func):\n return List(map(func, self))", "def map_collection(func, collection):\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection", "def map(self, obj):\n if isinstance(obj, np.ndarray) and obj.ndim >= 2 and obj.shape[0] in (2,3):\n return fn.transformCoordinates(self, obj)\n else:\n return QtGui.QMatrix4x4.map(self, obj)", "def map(self, fn, inv_fn):\r\n\t\treturn MapProjectedList(self, [fn], [inv_fn])", "def lift(func: Callable) -> Callable:\n return lambda f: compose2(func, f)", "def apply(native_object, function, *args, **kwargs):\n result = unwrap(function(wrap(native_object), *args, **kwargs))\n return result", "def __call__(self, f):\r\n return self.apply(f, None)", "def _map_fn(self):\n raise NotImplementedError", "def applymap(self, func, *args, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.applymap)(\n self, func, *args, **kwargs\n )", "def flat_map(fn, collection):\n return chain.from_iterable(map(fn, collection))", "def map(self, f):\n self.append(Mapper(f))\n return self", "def Map(\r\n data,\r\n map_fct: Callable,\r\n info: List[Dict] = None,\r\n lazy: bool = True,\r\n workers: int = 1,\r\n buffer_len: int = 3,\r\n *arg: list,\r\n **kwargs: Dict\r\n) -> Union[MapAbstract, DataAbstract, np.ndarray, list]:\r\n\r\n if lazy:\r\n return MapAbstract(data, map_fct, *arg, info=info, **kwargs)\r\n else:\r\n return DataAbstract(\r\n MapAbstract(data, map_fct, *arg, info=info, **kwargs),\r\n workers=workers,\r\n buffer_len=buffer_len,\r\n )[:]", "def mapf( f, C ):\n return (f(x) for x in C)", "def map(self, mapper):\n def _map(iterator):\n return mapper(next(iterator))\n return self.__class__(self, _map)", "def map_method(self, filter_func, method_name, *args, **kwds):\r\n return self.map(filter_func, self._call_extension_method,\r\n method_name, *args, **kwds)", "def singledispatchmethod(func):\n dispatcher = functools.singledispatch(func)\n def wrapper(*args, **kw):\n return dispatcher.dispatch(args[0].__class__).__func__(*args, **kw)\n wrapper.register = dispatcher.register\n functools.update_wrapper(wrapper, func)\n return wrapper", "def _map_over_compound_operators(f):\n @functools.wraps(f)\n def out(qobj):\n # To avoid circular dependencies\n from .cy.qobjevo import QobjEvo\n if isinstance(qobj, QobjEvo):\n return qobj.linear_map(f, _skip_check=True)\n if not isinstance(qobj, Qobj):\n raise TypeError(\"expected a quantum object\")\n return f(qobj)\n return out", "def foreach(function):\n return partial(map, function)", "def map(self, function=lambda value: value):\n for j, value in enumerate(self):\n self[j] = function(value)", "def map(self, f):\n if self.is_empty():\n pass\n else:\n items = []\n items.append(f(self._first))\n map(f._rest)\n new_lst = LinkedListRec(items)", "def map(self, fn, *iterables, **kwargs):\n fn = self._prepare_fn(fn)\n return self._self.map(fn, *iterables, **kwargs)", "def map_structure(fn: Callable[[T], R], obj: Collection[T]) ->Collection[R]:\n if hasattr(obj, '--no-map--'):\n return fn(obj)\n if isinstance(obj, list):\n return [map_structure(fn, x) for x in obj]\n if isinstance(obj, tuple):\n if isinstance(obj, torch.Size):\n return fn(obj)\n if hasattr(obj, '_fields'):\n return type(obj)(*[map_structure(fn, x) for x in obj])\n else:\n return tuple(map_structure(fn, x) for x in obj)\n if isinstance(obj, dict):\n return {k: map_structure(fn, v) for k, v in obj.items()}\n if isinstance(obj, set):\n return {map_structure(fn, x) for x in obj}\n return fn(obj)", "def map(self, func: Callable[[T], V]) -> 'List[V]':\n return [func(v) for v in self.array]", "def mlift(func):\n return compose(unit, func)", "def simple_map(f, l):\n # Again, my first take is a list comprehension.\n return [ f(item) for item in l ]", "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def simple_map_2(f, l):\n # Same as above without comprehension:\n mapped_l = []\n for item in l:\n mapped_l.append( f(item) ) # the extra blanks are just for readability\n return mapped_l", "def lift(cls, func):\n raise NotImplementedError", "def makemethod(function):\n def magicmethod(self, *args, **kwargs):\n return MagicField(lambda s: function(*args, **kwargs)(self._compute(s)))\n return magicmethod", "def recursive_map(func, data):\n\n def recurse(item):\n return recursive_map(func, item)\n\n items_mapped = map_collection(recurse, data)\n return func(items_mapped)", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def virtual(func: \"callable\"):\n return func", "def dot(fn):\n def access(obj):\n return getattr(obj, fn)\n return access", "def pipe(self, func: Callable, *args, **kwargs) -> Any:\n return func(self, *args, **kwargs)", "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def do(func, pure=False):\n def _dfunc(*args, **kwargs):\n return applyfunc(func, args, kwargs, pure=pure)\n\n with ignoring(AttributeError):\n _dfunc = wraps(func)(_dfunc)\n\n return _dfunc", "def map(iterable, function):\n for x in iterable:\n yield function(x)", "def fl_call_object_callback(ptr_flobject):\n _fl_call_object_callback = library.cfuncproto(\n library.load_so_libforms(), \"fl_call_object_callback\",\\\n None, [cty.POINTER(xfdata.FL_OBJECT)],\\\n \"\"\"void fl_call_object_callback(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_call_object_callback(ptr_flobject)", "def map(self, f: Callable[[Any], Any]) -> RecursiveList:\n # If empty, return empty list\n if self.is_empty():\n return RecursiveList([])\n else:\n # Apply f to the first element and make a new list to return\n rl = RecursiveList([f(self._first)])\n # Map the rest of the list and set it to rl's _rest\n rl._rest = self._rest.map(f) # recursive call\n return rl", "def apply(cls, func):\n raise NotImplementedError", "def wrapper(fn):\n if name is None:\n name_ = fn.__name__\n else:\n name_ = name\n original_method = getattr(cls,name_,default)\n new_method = fn(original_method)\n setattr(cls,name_,new_method)\n return fn", "def mapped(self, *args, **kwargs): # real signature unknown\r\n pass", "def map(pointer, objfile=\"\"):\n ei_class, ehdr = get_ehdr(pointer)\n return map_inner(ei_class, ehdr, objfile)", "def transform(self, f):\n\n return f(self)", "def map_values_c(fun):\n return partial(map_values, fun)", "def maybe_call(obj, *args, **kwargs):\n if callable(obj):\n return obj(*args, **kwargs)\n return obj", "def apply(instance, **methods: Callable):\n\n def decorator(method, func):\n @functools.wraps(method)\n def wrapper(*args, **kwargs):\n return func(method(*args, **kwargs))\n\n return staticmethod(wrapper)\n\n new_methods = {method: decorator(getattr(instance, method), func) for method, func in methods.items()}\n proxy = type('Apply', (Proxy,), new_methods)\n return proxy(instance)", "def map(z):\n pass", "def mapcat(f):\n return compose(map(f), cat)", "def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)", "def map_data(self, obj: object):\n pass", "def mapr(\n f: Callable[[Any], Any],\n collection: Sequence[Any]) -> List[Any]:\n if len(collection) == 0:\n return []\n return mapr(f, collection[:-1]) + [f(collection[-1])]", "def mapper(fun: Callable[[str], Pin], /) -> None:", "def fl_set_object_posthandler(ptr_flobject, pyfn_HandlePtr):\n #FL_HANDLEPTR = cty.CFUNCTYPE(cty.c_int, cty.POINTER(xfdata.FL_OBJECT),\n # cty.c_int, xfdata.FL_Coord, xfdata.FL_Coord, cty.c_int, cty.c_void_p)\n _fl_set_object_posthandler = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_posthandler\",\n xfdata.FL_HANDLEPTR, [cty.POINTER(xfdata.FL_OBJECT),\n xfdata.FL_HANDLEPTR],\\\n \"\"\"FL_HANDLEPTR fl_set_object_posthandler(FL_OBJECT * ob,\n FL_HANDLEPTR post)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.verify_function_type(pyfn_HandlePtr)\n cfn_HandlePtr = xfdata.FL_HANDLEPTR(pyfn_HandlePtr)\n library.keep_cfunc_refs(cfn_HandlePtr, pyfn_HandlePtr)\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_set_object_posthandler(ptr_flobject, cfn_HandlePtr)\n return retval", "def mapr( f, collection ):\n if len(collection) == 0: return []\n return mapr(f, collection[:-1]) + [ f(collection[-1]) ]", "def wrapper(*args, **kwargs):\r\n return lambda: func(*args, **kwargs)", "def func(cls):\n return cls.get_wrapper()(cls.callable)", "def walk(rv, F):\n args = getattr(rv, 'args', None)\n if args is not None:\n if args:\n newargs = tuple([walk(a, F) for a in args])\n if args != newargs:\n rv = rv.func(*newargs)\n if simultaneous:\n # if rv is something that was already\n # matched (that was changed) then skip\n # applying F again\n for i, e in enumerate(args):\n if rv == e and e != newargs[i]:\n return rv\n rv = F(rv)\n return rv", "def dispatch(self):\n return self._wrapper(self._dispatch_fun)", "def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))", "def map(self, func):\n execute = ExecutorMap(func)\n self._funcs.append(execute)\n return self", "def inline_map_reduce(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.inline_map_reduce(m.collection, *args, **kwargs)", "def map(function, iterable):\n\n return [function(x) for x in iterable]", "def map_structure_zip(fn: Callable[..., R], objs: Sequence[Collection[T]]) ->Collection[R]:\n obj = objs[0]\n if hasattr(obj, '--no-map--'):\n return fn(*objs)\n if isinstance(obj, list):\n return [map_structure_zip(fn, xs) for xs in zip(*objs)]\n if isinstance(obj, tuple):\n if isinstance(obj, torch.Size):\n return fn(obj)\n if hasattr(obj, '_fields'):\n return type(obj)(*[map_structure_zip(fn, xs) for xs in zip(*objs)])\n else:\n return tuple(map_structure_zip(fn, xs) for xs in zip(*objs))\n if isinstance(obj, dict):\n return {k: map_structure_zip(fn, [o[k] for o in objs]) for k in obj.keys()}\n if isinstance(obj, set):\n return {map_structure_zip(fn, xs) for xs in zip(*objs)}\n return fn(*objs)", "def _curry_callable(obj, args, kwargs):\n if isinstance(obj, types.MethodType):\n return (_invokeMember, (obj.im_self, obj.im_func.__name__) + args, kwargs)\n elif isinstance(obj, types.BuiltinMethodType):\n if not obj.__self__:\n return (obj, args, kwargs)\n else:\n return (_invokeMember, (obj.__self__, obj.__name__) + args, kwargs)\n elif isinstance(obj, types.ObjectType) and hasattr(obj, \"__call__\"):\n return (obj, args, kwargs)\n elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,\n types.ClassType, types.UnboundMethodType)):\n return (obj, args, kwargs)\n else:\n raise ValueError(\"obj must be callable\")", "def apply(func: Callable):\n def decorator(wrapped_func: Callable):\n def wrapper(*args, **kwargs):\n return func(\n wrapped_func(*args, **kwargs)\n )\n return wrapper\n return decorator", "def call(obj, /, *args, **kwargs):\n return obj(*args, **kwargs)", "def _call_real(vecObj):\n res = vecObj.real()\n return res", "def to_function(callable_object):\n if not ismethod(callable_object):\n return callable_object\n\n @wraps(callable_object)\n def decorated_method(*args, **kwargs):\n return callable_object(*args, **kwargs)\n return decorated_method", "def dummy_wrapper(func):\n return func", "def dummy_callback(obj):\n pass", "def partialmethod(func, arg):\n return lambda *args, **kwargs: func(arg, *args, **kwargs)", "def fl_set_object_callback(ptr_flobject, pyfn_CallbackPtr, numdata):\n #FL_CALLBACKPTR = cty.CFUNCTYPE(None, cty.POINTER(xfdata.FL_OBJECT),\n # cty.c_long)\n _fl_set_object_callback = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_callback\",\\\n xfdata.FL_CALLBACKPTR, [cty.POINTER(xfdata.FL_OBJECT),\n xfdata.FL_CALLBACKPTR, cty.c_long],\n \"\"\"FL_CALLBACKPTR fl_set_object_callback(FL_OBJECT * obj,\\\n FL_CALLBACKPTR callback, long int argument)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n l_numdata = library.convert_to_longc(numdata)\n library.verify_function_type(pyfn_CallbackPtr)\n cfn_CallbackPtr = xfdata.FL_CALLBACKPTR(pyfn_CallbackPtr)\n library.keep_cfunc_refs(cfn_CallbackPtr, pyfn_CallbackPtr)\n library.keep_elem_refs(ptr_flobject, numdata, l_numdata)\n retval = _fl_set_object_callback(ptr_flobject, cfn_CallbackPtr, \\\n l_numdata)\n return retval", "def call_orig_func(func, *args, **kwargs):\n return func(*args, **kwargs)", "def list_map(data, function):\n return list(map(function, data))", "def rapplicator(default_f):\n method_name = default_f.__name__\n def apply(f, *args, **kwds):\n if isinstance(f, rfunc):\n return getattr(f, method_name)(*args, **kwds)\n elif isinstance(f, functools.partial):\n if f.keywords:\n new_keywords = copy.copy(f.keywords)\n new_keywords.update(kwds)\n return apply(f.func, *(f.args + args), **new_keywords)\n else:\n return apply(f.func, *(f.args + args))\n else:\n return default_f(f, *args, **kwds)\n return apply", "def map_reduce(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.map_reduce(m.collection, *args, **kwargs)", "def walk_values_rec(f, coll):\n if is_mapping(coll):\n return f(walk_values(walk_values_rec(f), coll))\n elif is_list(coll):\n return f(list(map(walk_values_rec(f), coll)))\n else:\n return f(coll)", "def __call__(self, fn: Callable) -> Callable:\n\n @functools.wraps(fn)\n def wrapper(instance, target_obj):\n # Wrap function call within a UoW\n with UnitOfWork():\n fn(instance, target_obj)\n\n setattr(wrapper, \"_target_cls\", self._target_cls)\n return wrapper", "def singledispatch2(cls, f = None):\n\n # classic way of allowing args to a decorator\n if f is None:\n return lambda f: singledispatch2(cls, f)\n\n # initially registers func for object, so need to change to pd.DataFrame\n dispatch_func = singledispatch(f)\n if isinstance(cls, tuple):\n for c in cls: dispatch_func.register(c, f)\n else:\n dispatch_func.register(cls, f)\n # then, set the default object dispatcher to create a pipe\n register_pipe(dispatch_func, object)\n\n # register dispatcher for Call, and NoArgs\n pipe_call(dispatch_func)\n pipe_no_args(dispatch_func)\n\n @wraps(dispatch_func)\n def wrapper(*args, **kwargs):\n strip_args = map(strip_symbolic, args)\n strip_kwargs = {k: strip_symbolic(v) for k,v in kwargs.items()}\n\n if not args:\n return dispatch_func(NoArgs(), **strip_kwargs)\n\n return dispatch_func(*strip_args, **strip_kwargs)\n\n return wrapper", "def _imp_proxy_method(self, name, func):\n # We just return the original function\n return func", "def map_my(self, func: Callable[[Union[float, int]], int]) -> None:\n def list_func(lst: List[valueType]) -> List[valueType]:\n \"\"\"\n To apply the function/operation defined by users to every item in the list.\n :param lst: A list object like [element1, [element2, element3], element4].\n :return: A list that store the result of items after user-defined operation.\n \"\"\"\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp\n\n for head_node in self.hashTable:\n for node in head_node.singlyLinkedList:\n node.values = list_func(node.values)", "def map(iteratee, *seqs):\n return _map(fnc.iteratee(iteratee), *seqs)", "def map_named(function: Callable[[str, Any], Any],\n val: Any,\n key: Optional[str] = \"\") -> Any:\n if isinstance(val, Mapping):\n return type(val)(\n **{k: map_named(function, v, key + \"/\" + k) for k, v in val.items()})\n elif isinstance(val, tuple) or isinstance(val, list):\n return type(val)(\n *\n [map_named(function, v, key + \"/\" + str(i)) for i, v in enumerate(val)])\n # check if it's a flax dataclass\n elif hasattr(val, \"__dataclass_fields__\"):\n classname = repr(val).split(\"(\")[0]\n return type(val)(**{\n k: map_named(function, v, f\"{key}/{classname}.{k}\")\n for k, v in val.__dataclass_fields__.items()\n })\n else:\n return function(key, val)", "def __or__(self, func):\n return self.bind(func)", "def funcToMethod(func,clas,method_name=None):\n func.im_class=clas\n func.im_func=func\n func.im_self=None\n if not method_name: method_name=func.__name__\n clas.__dict__[method_name]=func" ]
[ "0.6220919", "0.5839332", "0.5682536", "0.56519437", "0.55576444", "0.5554828", "0.54123265", "0.53920573", "0.5390306", "0.53785086", "0.5342291", "0.5333462", "0.53316593", "0.532099", "0.526166", "0.52036095", "0.52029043", "0.51939046", "0.5186709", "0.51471406", "0.50727075", "0.5064578", "0.50631595", "0.5056027", "0.5044718", "0.5043015", "0.5027963", "0.4981475", "0.4974079", "0.49624532", "0.49537206", "0.49535877", "0.49354646", "0.4909717", "0.48819843", "0.48701683", "0.48563936", "0.48431063", "0.48232126", "0.4806734", "0.47721246", "0.47526303", "0.47518167", "0.47221586", "0.46917596", "0.4687392", "0.46688488", "0.46683738", "0.46645454", "0.46623674", "0.46579534", "0.46312216", "0.46293557", "0.46290505", "0.46252206", "0.462258", "0.46210873", "0.46171963", "0.4616526", "0.46097046", "0.4586866", "0.45732653", "0.45676434", "0.45656613", "0.4545414", "0.4539162", "0.45329878", "0.45196283", "0.45194882", "0.45149085", "0.45022944", "0.4500392", "0.44991127", "0.44950667", "0.4490966", "0.4490495", "0.44861245", "0.44861242", "0.44714302", "0.44690573", "0.44526458", "0.44517222", "0.44465598", "0.44438678", "0.44420934", "0.44290194", "0.44259015", "0.4414054", "0.44090936", "0.44056666", "0.44036543", "0.4395128", "0.43566006", "0.43541196", "0.43520486", "0.4345658", "0.43403754", "0.43387824", "0.43335533", "0.4333365" ]
0.80145234
0
Decorator that memoizes a function, preventing it from being recomputed if it is called multiple times with the same arguments.
def memoize(maxsize=None, *args, **kwargs): return _coconut.functools.lru_cache(maxsize, *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def memoize(f):\n cache = {}\n @functools.wraps(f)\n def g(*args):\n ret = cache.get(args, cache)\n if ret is cache:\n ret = cache[args] = f(*args)\n return ret\n return g", "def memoize(func):\n cache = {}\n @wraps(func)\n def wrap(*args):\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n return wrap", "def memoize(function):\r\n cache = {}\r\n def decorated_function(*args):\r\n if args in cache:\r\n return cache[args]\r\n else:\r\n val = function(*args)\r\n cache[args] = val\r\n return val\r\n return decorated_function", "def memoize(func):\r\n cache = {}\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n key = (args, frozenset(kwargs.items()))\r\n if key not in cache:\r\n cache[key] = func(*args, **kwargs)\r\n return cache[key]\r\n return wrapper", "def memoize(func):\n memo = None\n\n @wraps(func)\n def wrapper(self):\n if memo is not None:\n return memo\n\n return func(self)\n\n return wrapper", "def memoize(func):\r\n func.cache = {}\r\n return decorator(_memoize, func)", "def memo(func):\n cache = {}\n\n def wrapper(*args, **kwargs):\n update_wrapper(wrapper, func)\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return wrapper", "def memoize(f):\n memo = {}\n\n def helper(*args, **kwargs):\n x = args, tuple(kwargs.items())\n if x not in memo:\n memo[x] = f(*args, **kwargs)\n return memo[x]\n\n return helper", "def memoize(func):\n\n @wraps(func)\n def wrapped():\n try:\n return func.result\n except AttributeError:\n pass\n\n func.result = func()\n return func.result\n\n return wrapped", "def memoize():\n unknown = object()\n\n def decorator(function):\n \"\"\"Wraps a function and returns its memoized version.\n\n Args:\n function: Function to wrap with memoization.\n Returns:\n The memoized version of the specified function.\n \"\"\"\n # Map: args tuple -> function(args)\n memoized = dict()\n\n def memoize_wrapper(*args, **kwargs):\n \"\"\"Memoization function wrapper.\"\"\"\n all_args = (args, tuple(sorted(kwargs.items())))\n value = memoized.get(all_args, unknown)\n if value is unknown:\n value = function(*args, **kwargs)\n memoized[all_args] = value\n return value\n\n return memoize_wrapper\n\n return decorator", "def memoization(func):\n cache = {}\n\n @wraps(func)\n def _wrap(*args, **kwargs):\n key = (args, tuple(sorted(kwargs.items())))\n result = cache.get(key, None)\n if result:\n print(\"It's cached\")\n return result\n\n result = func(*args, **kwargs)\n cache[key] = result\n return result\n\n return _wrap", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n except TypeError: # unhashable argument\n return f(*args)\n return _f", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n except TypeError: # unhashable argument\n return f(*args)\n return _f", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n except TypeError: # unhashable argument\n return f(*args)\n return _f", "def memoize_by_args(func):\n memory = {}\n\n @functools.wraps(func)\n def memoized(*args):\n if args not in memory.keys():\n value = func(*args)\n memory[args] = value\n\n return memory[args]\n\n return memoized", "def memoize(f):\r\n cache = {}\r\n\r\n def rval(*args, **kwargs):\r\n kwtup = tuple(kwargs.items())\r\n key = (args, kwtup)\r\n if key not in cache:\r\n val = f(*args, **kwargs)\r\n cache[key] = val\r\n else:\r\n val = cache[key]\r\n return val\r\n\r\n return rval", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n return f(args)\n return _f", "def memo(func):\n cache = {}\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = str(args) + str(kwargs)\n try:\n return cache[key]\n except KeyError:\n rc = func(*args, **kwargs)\n cache[key] = rc\n return rc\n return wrapper", "def _memoize(args_func=sorted):\n def _memoize(wrapped):\n wrapped.cache = dict()\n wrapped.cache['call_count'] = 0\n @wraps(wrapped)\n def func(*args):\n wrapped.cache['call_count'] += 1\n hashed_args = tuple(args_func(args))\n if hashed_args in wrapped.cache:\n return wrapped.cache.get(hashed_args)\n return wrapped.cache.setdefault(hashed_args, wrapped(*args))\n return func\n return _memoize", "def memoize(func):\n @wraps(func)\n def memoizer(self):\n if not hasattr(self, '_cache'):\n self._cache = {}\n if func.__name__ not in self._cache:\n self._cache[func.__name__] = func(self)\n return self._cache[func.__name__]\n return memoizer", "def memoized(f):\n cache = {}\n\n @wraps(f)\n def _memoized(*args, **kwargs):\n key = tuple(args) + tuple(kwargs.items())\n try:\n if key in cache:\n return cache[key]\n except TypeError: # if passed an unhashable type evaluate directly\n return f(*args, **kwargs)\n ret = f(*args, **kwargs)\n cache[key] = ret\n return ret\n return _memoized", "def memoize(func):\n cache = {}\n # Store results in a dict that maps arguments to results\n def wrapper(*args, **kwargs):\n if(args, kwargs) not in cache:\n # call func() and store the result.\n cache[(args,kwargs)] = func(*args,**kwargs)\n return cache[(args,kwargs)]\n return wrapper", "def _memoize(func, *args, **opts):\r\n if opts: # frozenset is used to ensure hashability\r\n key = args, frozenset(opts.items())\r\n else:\r\n key = args\r\n cache = func.cache # attribute added by memoize\r\n try:\r\n result = cache[key]\r\n except KeyError:\r\n result = cache[key] = func(*args, **opts)\r\n return result", "def memoize(decorated, memo):\n key = convert_to_hashable(decorated.args, decorated.kwargs)\n if key in memo:\n return memo[key]\n res = decorated(*decorated.args, **decorated.kwargs)\n memo[key] = res\n return res", "def memoize(func):\n table = dict() # function specific memoize table\n def wrappingfunction(*args):\n if args not in table: # args tuple hasn't been seen yet\n table[args] = func(*args) # envoke func call and store value\n return table[args] # return stored value\n return wrappingfunction # return wrappee", "def memoize(func):\n result: List[Any] = []\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func", "def memoize(func):\n mem = {}\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in mem:\n mem[key] = func(*args, **kwargs)\n return mem[key]\n return memoizer", "def decorator(function):\n # Map: args tuple -> function(args)\n memoized = dict()\n\n def memoize_wrapper(*args, **kwargs):\n \"\"\"Memoization function wrapper.\"\"\"\n all_args = (args, tuple(sorted(kwargs.items())))\n value = memoized.get(all_args, unknown)\n if value is unknown:\n value = function(*args, **kwargs)\n memoized[all_args] = value\n return value\n\n return memoize_wrapper", "def memoization(function):\n\n def _decorator_(self):\n key = function.__name__\n value = memoization_get(self, key)\n if value is not None:\n return value\n else:\n value = function(self)\n memoization_set(self, key, value)\n return value\n\n _decorator_.__doc__ = function.__doc__\n return _decorator_", "def memo(f):\n # Peter Norvig's\n cache = {}\n\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(*args)\n _f.cache = cache\n return _f", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(*args)\n _f.cache = cache\n return _f", "def memo(f):\n def _f(*args):\n try:\n return _f.cache[args]\n except KeyError:\n _f.cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(*args)\n _f.__name__ = f.__name__\n _f.cache = {}\n return _f", "def memoize(f):\n cache = {}\n\n def memf(*x):\n if x not in cache:\n cache[x] = f(*x)\n return cache[x]\n return memf", "def simple_memoize(wrapped):\n wrapped.cache = dict()\n @wraps(wrapped)\n def func(*args):\n if args in wrapped.cache:\n return wrapped.cache[args]\n return wrapped.cache.setdefault(args, wrapped(*args))\n return func", "def _memoize_return_values(func):\n cache= {}\n @wraps(func)\n def memf(*args, **kwargs):\n key = (args, frozenset(kwargs.items()))\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return memf", "def _memoizeArgsOnly (max_cache_size=1000):\n def wrapper (f):\n def fn (*args):\n try:\n return fn.cache[args]\n except KeyError:\n if fn.count >= max_cache_size:\n fn.cache = {}\n fn.count = 0\n fn.cache[args] = result = f(*args)\n fn.count += 1\n return result\n fn.cache = {}\n fn.count = 0\n return fn\n return wrapper", "def memoize(function):\n\tdef wrapper(*Args, **KWArgs):\n\n\t\tkey = _getMemoizeKey(function, *Args, **KWArgs)\n\n\t\tif key in Memory:\n\t\t\treturn Memory[key]\n\n\t\tRet = function(*Args, **KWArgs)\n\n\t\tMemory[key] = Ret\n\n\t\treturn Ret\n\n\twrap(function, wrapper)\n\n\treturn wrapper", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "def memoized(f):\n GlobalCache._caches[f] = {}\n GlobalCache._locks[f] = RLock()\n\n return decorator(GlobalCache.memoize, f)", "def memoize(func):\n tbl = {}\n\n def helper(args):\n if args not in tbl:\n tbl[args] = func(args)\n return tbl[args]\n return helper", "def memoize(cls, func, *args, **kw):\n with cls._locks[func], cls._lock:\n if not isinstance(args, collections.Hashable):\n result = func(*args, **kw)\n return result\n if kw:\n # frozenset is used to ensure hashability\n key = args, frozenset(kw.items())\n else:\n key = args\n # func.cache attribute added by memoize\n cache = cls._caches[func]\n try:\n if key in cache:\n result = cache[key].result\n cls.shrink_cache()\n return result\n except TypeError:\n result = func(*args, **kw)\n return result\n\n start = time.time()\n result = func(*args, **kw)\n end = time.time()\n duration = end - start\n\n cache[key] = CacheEntry(func, key, duration, result,\n kw.get('expiration'), *args, **kw)\n cls.shrink_cache()\n cls._cache.append(cache[key])\n return result", "def memoize(fn):\n cache = {}\n def newfn(*args, **kw):\n key = (tuple(args), tuple(sorted(kw.items())))\n if key in cache:\n return cache[key]\n else:\n cache[key] = val = fn(*args, **kw)\n return val\n newfn.__name__ = fn.__name__ + ' (MEMOIZED)'\n newfn.__module__ = fn.__module__\n return newfn", "def memoize( func=None, keyfn=tuple ):\n if func != None:\n memo_table = {}\n @functools.wraps( func )\n def memoized( *args ):\n \"\"\"\n Memoized Function\n \"\"\"\n value = keyfn( args )\n # check if parameter is in memo table\n # if so return cached value\n if value in memo_table:\n return memo_table[ value ]\n else:\n # if not in table store in table\n result = func( *args )\n memo_table[ value ] = result\n return result\n return memoized\n else:\n return functools.partial( memoize, keyfn = keyfn )", "def memoized( function ):\n\n mfWrap = _MemoizedFunction( function )\n\n # For efficiency purposes, let's make it as easy to look up\n # mfWrap.cache as possible.\n cache = mfWrap.cache\n\n def memoizedFunctionWrapper( *args ):\n # We're using a try-except clause here instead of testing\n # whether the dictionary has a key because we believe that it\n # is more efficient; it's preferable to speed up the most\n # common scenario where a cached value already exists by\n # simply assuming that it *does* exist.\n\n try:\n return cache[args]\n except KeyError:\n cache[args] = function( *args )\n return cache[args]\n \n finalWrapper = _generateArgWrapper( function, memoizedFunctionWrapper )\n\n return finalizeWrapper( function,\n finalWrapper,\n \"Memoized\" )", "def memoize(f):\n class MemoDict(dict):\n def __init__(self, func):\n self.func = func\n\n def __call__(self, *args):\n return self[args]\n\n def __missing__(self, key):\n result = self[key] = self.func(*key)\n return result\n\n return MemoDict(f)", "def memoize(f):\n cache = OrderedDict({})\n\n def wrapper(*keys, **kwargs):\n \"\"\" Search for invoker function's return value in cache for given arguments,\n if found then return else store function parameters as key\n and function return value as value in cache\n If cache size exceeds 2, delete the oldest used key value record\n \"\"\"\n key = str(keys) + str(kwargs)\n if key in cache:\n value = cache.pop(key)\n cache[key] = value\n return cache[key]\n while len(cache)>1:\n cache.popitem(False)\n cache[key] = f(*keys, **kwargs)\n return cache[key]\n return wrapper", "def memorized(f):\n cache = {}\n @wraps(f)\n def wrapped(*args):\n try:\n result = cache[args]\n except KeyError:\n result = cache[args] = f(*args)\n return result\n return wrapped", "def memoize(f):\n\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n self.__name__ = f.__name__\n\n def __call__(self, *args):\n global memoized_return_values\n try:\n return memoized_return_values[self.__name__]\n except KeyError:\n ret = memoized_return_values[self.__name__] = self.f(*args)\n return ret\n\n return memodict(f)", "def memoize(f):\n\n class memodict(dict):\n def __getitem__(self, *key):\n return dict.__getitem__(self, key)\n\n def __missing__(self, key):\n self[key] = ret = f(*key)\n return ret\n\n return memodict().__getitem__", "def memorize(func):\n cache = {}\n\n @wraps(func)\n def cached_function(*args, **kwargs):\n if args not in cache:\n cache[args] = func(*args, **kwargs)\n return cache[args]\n\n return cached_function", "def __call__(self, *args):\n if args not in self.memo:\n self.memo[args] = self.f(*args)\n return self.memo[args]", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n return memoizer", "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n\n return memoizer", "def memo(qty):\n def decorator(f):\n decoratee = Memo(qty,f)\n return functools.wraps(f)(decoratee)\n return decorator", "def memoize(default=None):\n def memoizer(func):\n val = []\n @functools.wraps(func)\n def inner():\n if not val:\n ret = func()\n val.append(ret if ret is not None else default)\n if logging.getLogger().isEnabledFor(logging.INFO):\n print '%s -> %r' % (func.__name__, val[0])\n return val[0]\n return inner\n return memoizer", "def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache", "def memoize(prefix, time=60):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = memoize_key(prefix, *args, **kwargs)\n data = cache.get(key)\n if data is not None:\n return data\n data = func(*args, **kwargs)\n cache.set(key, data, time)\n return data\n return wrapper\n return decorator", "def memoize(f):\n\n class memodict(dict):\n @wraps(f)\n def __getitem__(self, *args):\n return super(memodict, self).__getitem__(*args)\n\n def __missing__(self, key):\n self[key] = ret = f(key)\n return ret\n\n return memodict().__getitem__", "def decorator(function):\n if name is None:\n try:\n # Python 3\n reference = function.__qualname__\n except AttributeError: # pragma: no cover\n # Python 2\n reference = function.__name__ # pragma: no cover\n\n reference = function.__module__ + reference\n else:\n reference = name\n\n reference = (reference,)\n\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n \"Wrapper for callable to cache arguments and return values.\"\n\n # match arguments to function signature\n func_signature = inspect.signature(function)\n proc_args = []\n proc_kwargs = {}\n for i_param, (name, param) in enumerate(func_signature.parameters.items()):\n if param.kind is not inspect.Parameter.POSITIONAL_OR_KEYWORD:\n raise NotImplementedError('Memoize decorator only supports positional-or-keyword arguments. '\n 'Submit issue to request support for optional arguments')\n\n if param.default is inspect._empty:\n if i_param < len(args):\n val = args[i_param]\n elif param.name in kwargs:\n val = kwargs[param.name]\n else:\n raise TypeError(\"{} missing required positional argument '{}'\".format(function.__name__, param.name))\n proc_args.append(val)\n else:\n if i_param < len(args):\n val = args[i_param]\n elif param.name in kwargs:\n val = kwargs[param.name]\n else:\n val = param.default\n proc_kwargs[param.name] = val\n\n # generate key from arguments\n key = reference + tuple(proc_args)\n\n if proc_kwargs:\n key += (diskcache.core.ENOVAL,)\n sorted_items = sorted(proc_kwargs.items())\n\n for item in sorted_items:\n key += item\n\n if typed:\n key += tuple(type(arg) for arg in proc_args)\n\n if proc_kwargs:\n key += tuple(type(value) for _, value in sorted_items)\n\n for filename_arg in filename_args:\n stats = []\n for filename in glob.glob(proc_args[filename_arg]):\n stats.append((os.path.getmtime(filename), self._hash_file_content(filename)))\n key += tuple(stats)\n\n for filename_kwarg in filename_kwargs:\n if filename_kwarg in proc_kwargs:\n stats = []\n for filename in glob.glob(proc_kwargs[filename_kwarg]):\n stats.append((os.path.getmtime(filename), self._hash_file_content(filename)))\n key += tuple(stats)\n\n result = self.get(key, default=diskcache.core.ENOVAL, retry=True)\n\n if result is diskcache.core.ENOVAL:\n result = function(*args, **kwargs)\n self.set(key, result, expire=expire, tag=tag, retry=True)\n\n return result\n\n return wrapper", "def memoizex(f):\n # http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/\n class MemoDict(dict):\n def __missing__(self, key):\n result = self[key] = f(key)\n return result\n return MemoDict().__getitem__", "def memoized(func, *, size_limit=10**8, eviction_policy='least-recently-used', cache_dir=CACHE_DIR,\n typed=False, round_digits=15, ignore_args=None):\n func_hash = hashlib.md5(func.__code__.co_code).hexdigest()\n func_id = \"{}.{:0>4s}\".format(func.__qualname__, func_hash[-4:])\n cache_dir = os.path.join(cache_dir, func_id)\n func.cache = diskcache.Cache(cache_dir, size_limit=size_limit, eviction_policy=eviction_policy)\n func.async_results = {}\n\n atexit.register(func.cache.close)\n\n @atexit.register\n def consolidate_async():\n for key, result in func.async_results.items():\n try:\n if result.successful():\n func.cache[dict(sorted(key))] = result.get()\n # Exception class changed in Python 3.7:\n # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.AsyncResult.successful\n except (AssertionError, ValueError):\n pass\n\n arg_names = inspect.getfullargspec(func).args\n if ignore_args is not None:\n ignore_args = frozenset([ignore_args] if isinstance(ignore_args, str) else ignore_args)\n assert all(arg in arg_names for arg in ignore_args), \"Unknown argument name passed to 'ignore_args' option.\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n key = kwargs.copy()\n key.update(zip(arg_names, args))\n if ignore_args is not None:\n key = {k: v for k, v in key.items() if k not in ignore_args}\n if not typed:\n key = {k: _normalize_type(v, round_digits) for k, v in key.items()}\n key = dict(sorted(key.items()))\n\n try:\n return func.cache[key]\n except KeyError:\n try:\n return func.async_results[tuple(key.items())]\n except KeyError:\n logging.debug(\"%s: cache miss on key %s\", wrapper.__qualname__, repr(key))\n value = func(*args, **kwargs)\n if isinstance(value, pool.AsyncResult):\n func.async_results[tuple(key.items())] = value\n else:\n func.cache[key] = value\n return value\n\n return wrapper", "def memoize(func=None, maxsize=2):\n\n def decorator(func):\n\n cache = OrderedDict()\n hits = 0\n misses = 0\n\n def cache_info():\n return {\n 'hits': hits,\n 'misses': misses,\n 'maxsize': maxsize,\n 'currsize': len(cache),\n }\n\n def set_cache_maxsize(x):\n nonlocal maxsize\n maxsize = x\n\n while len(cache) > maxsize:\n # Remove the left item (least recently used)\n cache.popitem(last=False)\n\n setattr(func, 'cache_info', cache_info)\n setattr(func, 'set_cache_maxsize', set_cache_maxsize)\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n nonlocal misses\n nonlocal hits\n\n all_args = list(args) + sorted(kwargs.items())\n key = _make_hashable(all_args)\n if key not in cache:\n # Make sure the current size is less than the max size\n while len(cache) >= maxsize:\n # Remove the left item (least recently used)\n cache.popitem(last=False)\n\n # This inserts the item on the right (most recently used)\n cache[key] = func(*args, **kwargs)\n misses += 1\n else:\n # Move the item to the right (most recently used)\n cache.move_to_end(key)\n hits += 1\n\n return cache[key]\n\n return wrapped\n\n if func is None:\n return decorator\n else:\n return decorator(func)", "def cache_result(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n assert len(args) == 0 and len(kwargs) == 0, \"Wrapped call must be empty\"\n if not hasattr(f, \"cached_result\"):\n f.cached_result = f()\n return f.cached_result\n return wrapper", "def memoize(obj):\r\n cache = obj.cache = {}\r\n\r\n @functools.wraps(obj)\r\n def memoizer(*args, **kwargs):\r\n key = str(args) + str(kwargs)\r\n if key not in cache:\r\n cache[key] = obj(*args, **kwargs)\r\n # only keep the most recent 100 entries\r\n if len(cache) > 100:\r\n cache.popitem(last=False)\r\n return cache[key]\r\n return memoizer", "def dp_memoize_instance(f):\n \n memoize = memoize_limited(max_size=None, max_mem_MB=25)\n return memoize(f)", "def memoize_with_args(f):\n\n class memodict():\n def __init__(self, f):\n self.f = f\n self.result = {}\n self.__name__ = f.__name__\n\n def __call__(self, *args):\n args_string = f.__name__\n for arg in args:\n if (isinstance(arg, ndarray)):\n args_string += hashlib.sha1(arg).hexdigest() + \",\"\n else:\n args_string += hashlib.sha1(str(arg)).hexdigest() + \",\"\n try:\n return self.result[args_string]\n except KeyError:\n self.result[args_string] = self.f(*args)\n return self.result[args_string]\n\n return memodict(f)", "def _memorize(func):\n\n def _wrapper(self, *args, **kwargs):\n \"\"\"Wrapper to cache the function's output.\n \"\"\"\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val\n return _wrapper", "def permanent_function(*old_func):\n\n def decorator(func):\n \"\"\"\n decorates the given function and makes it a lazy one.\n\n :param function func: decorated function.\n\n :returns: function result.\n \"\"\"\n\n def wrapper():\n \"\"\"\n decorates the given function and makes it a lazy one.\n\n :returns: function result.\n \"\"\"\n\n result = caching_services.try_get('permanent', func, None)\n if result is not None:\n return result\n\n result = func()\n caching_services.try_set('permanent', result, func, None)\n return result\n\n return update_wrapper(wrapper, func)\n\n if len(old_func) > 0:\n return decorator(old_func[0])\n\n return decorator", "def decorator(func):\n\n def wrapper():\n \"\"\"\n decorates the given function and makes it a lazy one.\n\n :returns: function result.\n \"\"\"\n\n result = caching_services.try_get('permanent', func, None)\n if result is not None:\n return result\n\n result = func()\n caching_services.try_set('permanent', result, func, None)\n return result\n\n return update_wrapper(wrapper, func)", "def cache_result(func):\n\n @wraps(func)\n def with_cache(*args, **kwargs):\n \"\"\"\n Cached function\n \"\"\"\n key = '{}{}{}'.format(\n hash(func), hash(args), hash(frozenset(kwargs.items())))\n\n cached_result = cache.get(key)\n if cached_result is not None:\n return cached_result if cached_result != 'None' else None\n result = func(*args, **kwargs)\n cache.set(key, result if result is not None else 'None')\n\n return result\n\n return with_cache", "def test_memoization(self):\n non_memoized_func = lambda: random.randint(0, 1000000)\n yes_memoized_func = util.memoize(non_memoized_func)\n self.assertNotEqual(non_memoized_func(), non_memoized_func())\n self.assertEqual(yes_memoized_func(), yes_memoized_func())", "def one_use(func):\n attribute = \"_cache_\" + func.__name__\n\n @property\n @functools.wraps(func)\n def decorated(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, func(self))\n return getattr(self, attribute)\n return decorated", "def memoize(func=None, maxlen=None):\r\n if func is not None:\r\n cache = BoundedOrderedDict(maxlen=maxlen)\r\n @functools.wraps(func)\r\n def memo_target(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n lookup_value = pickle.dumps(candidate, 1)\r\n if lookup_value not in cache:\r\n cache[lookup_value] = func([candidate], args)[0]\r\n fitness.append(cache[lookup_value])\r\n return fitness\r\n return memo_target\r\n else:\r\n def memoize_factory(func):\r\n return memoize(func, maxlen=maxlen)\r\n return memoize_factory", "def at_most_once_per_cycle(fn): # pylint: disable=invalid-name\n # pylint: disable=missing-docstring,protected-access\n @functools.wraps(fn)\n def wrapper(self, *args, **kwargs):\n # no caching if the main loop is not running\n if not self._progress_last_run:\n return fn(self, *args, **kwargs)\n else:\n key = (fn, tuple(id(arg) for arg in args))\n try:\n update = (self._cache_last_updated[key] < self._progress_last_run)\n except AttributeError:\n self._cache_last_updated = defaultdict(float)\n self._cache_value = {}\n update = True\n if update:\n self._cache_value[key] = fn(self, *args)\n self._cache_last_updated[key] = time.time()\n # gc3libs.log.debug(\"%s(%s, ...): Using cached value '%s'\",\n # fn.__name__, obj, obj._cache_value[key])\n return self._cache_value[key]\n return wrapper", "def func_once(func):\n def decorated(*args, **kwargs):\n try:\n return decorated._once_result\n except AttributeError:\n decorated._once_result = func(*args, **kwargs)\n return decorated._once_result\n return decorated", "def decorator(func):\n\n def wrapper(*args, **kwargs):\n \"\"\"\n decorates the given method or function and makes it a lazy one.\n\n :param object args: function positional arguments.\n :param object kwargs: function keyword arguments.\n\n :returns: function result.\n \"\"\"\n\n result = caching_services.try_get('extended.permanent', func,\n args, kwargs, **options)\n if result is not None:\n return result\n\n result = func(*args, **kwargs)\n caching_services.try_set('extended.permanent', result, func,\n args, kwargs, **options)\n return result\n\n return update_wrapper(wrapper, func)", "def cached(func):\n cache_dct = {}\n\n @wraps(func)\n def _lru_cache_decorator(*args):\n key = args\n if key in cache_dct:\n return cache_dct[key]\n else:\n cache_dct[key] = func(*args)\n return cache_dct[key]\n return _lru_cache_decorator", "def cached(*arg: Callable) -> Any:\n fn = arg and arg[0]\n if not callable(fn):\n raise TypeError(\n '\"cached\" decorator must be used without arguments.') from None\n\n @wraps(fn)\n def wrapper(obj: object, *, force=False) -> Any:\n cache_name = '_' + fn.__name__\n if force:\n with suppress(AttributeError):\n delattr(obj, cache_name)\n try:\n return getattr(obj, cache_name)\n except AttributeError:\n val = fn(obj)\n setattr(obj, cache_name, val)\n return val\n\n return wrapper", "def cached(key):\n\n def decorator(fn):\n def decorated(cls):\n value = cls.get_cache(key)\n if value is not None:\n return value\n else:\n value = fn(cls)\n cls.set_cache(key, value)\n return value\n\n return decorated\n\n return decorator", "def constant_memo(f):\n f.memo_prec = -1\n f.memo_val = None\n def g(prec, **kwargs):\n memo_prec = f.memo_prec\n if prec <= memo_prec:\n return f.memo_val >> (memo_prec-prec)\n newprec = int(prec*1.05+10)\n f.memo_val = f(newprec, **kwargs)\n f.memo_prec = newprec\n return f.memo_val >> (newprec-prec)\n g.__name__ = f.__name__\n g.__doc__ = f.__doc__\n return g", "def cache(self, func=None, dependencies=(), ignore=None):\n if func is None:\n return partial(\n self.cache,\n dependencies=dependencies,\n ignore=ignore,\n )\n\n assert callable(func)\n\n # Update dependencies to enable chaining of dependencies.\n dependencies = (\n *dependencies,\n *reduce(\n add,\n map(\n tuple,\n map(\n default_attrgetter(\"_orig_func._dependencies\", default=()),\n dependencies,\n ),\n ),\n (),\n ),\n )\n\n def _inner_placeholder(hashed, args, kwargs):\n return func(*args, **kwargs)\n\n _inner_placeholder.__name__ = func.__name__\n\n cached_inner = self.memory.cache(ignore=[\"args\", \"kwargs\"])(_inner_placeholder)\n\n def bound_get_hashed(*orig_args, **orig_kwargs):\n return _get_hashed(\n func,\n *orig_args,\n dependencies=dependencies,\n hash_func=self.get_hash,\n ignore=ignore,\n **orig_kwargs,\n )\n\n @wraps(func)\n def cached_func(*orig_args, **orig_kwargs):\n hashed = bound_get_hashed(*orig_args, **orig_kwargs)\n return cached_inner(hashed, orig_args, orig_kwargs)\n\n def check_in_store(*args, **kwargs):\n \"\"\"Check whether a given call to the cached function is already cached.\n\n Args:\n args, kwargs: Arguments to check.\n\n Returns:\n IN_STORE: If the given call was found in the cache.\n\n Raises:\n NotCachedError: If the given call was not found in the cache.\n\n \"\"\"\n output_ids = cached_inner._get_output_identifiers(\n bound_get_hashed(*args, **kwargs), args, kwargs\n )\n if not cached_inner.store_backend.contains_item(output_ids):\n raise NotCachedError(f\"The given call is not cached: {output_ids}\")\n return IN_STORE\n\n cached_func.check_in_store = check_in_store\n cached_func._orig_func = func\n cached_func._orig_func._dependencies = dependencies\n\n return cached_func", "def cached(backend, **kwargs):\n def decorator(fn, key=None, key_generator=None, set_kwargs=None):\n if key is None:\n key = generate_function_key(fn)\n\n if key_generator is None:\n key_generator = generate_unique_key\n\n if set_kwargs is None:\n set_kwargs = {}\n\n @functools.wraps(fn)\n def inner(*args, **kwargs):\n unique_key = '%s:%s' % (key, key_generator(*args, **kwargs))\n\n # If the value is `None` from the cache, then generate the real\n # value and store it.\n value = backend.get(unique_key)\n if value is None:\n value = fn(*args, **kwargs)\n backend.set(unique_key, value, **set_kwargs)\n\n return value\n return inner\n\n return functools.partial(decorator, **kwargs)", "def memoized(cachetype=cachetools.LRUCache, cachesize=32,\n keyfcn=NumKeyLite, *cargs, **ckwargs):\n if cachetype is None:\n # Do nothing, return identity decorator.\n return lambda x: x\n\n def decorator(arraymethod):\n \"\"\"Method decorator to be returned by the enclosing factory function.\n \"\"\"\n # Create the wrapper around the underlying method call. This wrapper\n # does the memoization using the cache just\n # retrieve-if-absent-create'd.\n mname = arraymethod.__name__\n\n @functools.wraps(arraymethod)\n def wrapper(self, arrayarg, *args, **kwargs):\n \"\"\"The actual wrapper that intercepts the method call arguments,\n performs the caching, and returns the result to caller.\n \"\"\"\n # If no cache yet, create cache for this method.\n try:\n # _cachedict is keyed by the method names, rather than the\n # unwrapped method objects themselves, although the latter is\n # possible. We choose the former because this helps debugging\n # better. The original method, once wrapped, could be hard to\n # access by a Python name, although one can still enumerate\n # _cachedict's keys. Our choice works, because\n # functools.wraps() ensures conservation of names.\n cache = self._cachedict[mname]\n except KeyError:\n cache = cachetype(cachesize, *cargs, **ckwargs)\n self._cachedict[mname] = cache\n argkey = keyfcn(arrayarg)\n try:\n return cache[argkey]\n except KeyError:\n # Cache miss, compute and store the return value.\n pass\n retval = arraymethod(self, arrayarg, *args, **kwargs)\n try:\n cache[argkey] = retval\n except ValueError:\n # Value probably too large, ignore and pass through.\n pass\n return retval\n return wrapper\n return decorator", "def cache(self, func=None, ignore=None, verbose=None,\r\n mmap_mode=False):\r\n if func is None:\r\n # Partial application, to be able to specify extra keyword\r\n # arguments in decorators\r\n return functools.partial(self.cache, ignore=ignore,\r\n verbose=verbose, mmap_mode=mmap_mode)\r\n if self.cachedir is None:\r\n return NotMemorizedFunc(func)\r\n if verbose is None:\r\n verbose = self._verbose\r\n if mmap_mode is False:\r\n mmap_mode = self.mmap_mode\r\n if isinstance(func, MemorizedFunc):\r\n func = func.func\r\n return MemorizedFunc(func, cachedir=self.cachedir,\r\n mmap_mode=mmap_mode,\r\n ignore=ignore,\r\n compress=self.compress,\r\n verbose=verbose,\r\n timestamp=self.timestamp)", "def test_decorator_only_calls_function_once_if_value_cached(self, _, mock_get_grams_protein):\n # Set qualname since internal function uses it\n mock_get_grams_protein.__qualname__ = 'tests.Meat.get_grams_protein'\n decorated_mock_grams_protein = cached(timeout=5*60)(mock_get_grams_protein)\n decorated_mock_grams_protein(self.chicken)\n # Call the function twice with the same args\n decorated_mock_grams_protein(self.chicken)\n # calling the decorated mock function twice with the same args should only call the mock function once\n # as the return value should be stored inside the cache\n self.assertEqual(mock_get_grams_protein.call_count, 1)", "def cached(key):\n def wrapper(function):\n def wrapped(d,g,i):\n if key not in d:\n d[key] = function(d,g,i)\n return d[key]\n return wrapped\n return wrapper", "def cached(cache, key=keys.hashkey, lock=None):\n def decorator(func):\n if cache is None:\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n elif lock is None:\n def wrapper(*args, **kwargs):\n k = key(*args, **kwargs)\n try:\n return cache[k]\n except KeyError:\n pass # key not found\n v = func(*args, **kwargs)\n try:\n cache[k] = v\n except ValueError:\n pass # value too large\n return v\n else:\n def wrapper(*args, **kwargs):\n k = key(*args, **kwargs)\n try:\n with lock:\n return cache[k]\n except KeyError:\n pass # key not found\n v = func(*args, **kwargs)\n try:\n with lock:\n cache[k] = v\n except ValueError:\n pass # value too large\n return v\n return _update_wrapper(wrapper, func)\n return decorator", "def cached(func):\n return _lru_cache(None)(func)", "def cached(function):\n\t@wraps(function)\n\tdef check_cache_first(cls, *args):\n\t\tif not args in cls._cache:\n\t\t\tcode = function(cls, *args)\n\t\t\tif code:\n\t\t\t\tcls._cache[args] = code\n\t\t\t\treturn code\n\t\telse:\n\t\t\treturn cls._cache[args]\n\t\treturn None\n\treturn check_cache_first", "def _wrapper(self, *args, **kwargs):\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val", "def lru_cache(maxsize):\n\n def dec(fn, *args):\n cache = {}\n\n @wraps(fn)\n def wrapper(*args):\n key = args\n try:\n ret = cache[key]\n except KeyError:\n ret = cache[key] = fn(*args)\n return ret\n\n return wrapper\n\n return dec", "def lazyproperty(f: Callable[..., Any]):\n # pylint: disable=unused-variable\n return property(functools.lru_cache(maxsize=100)(f))", "def memoize(key, returns_entity=False, time=60):\n def decorator(fxn):\n def wrapper(*args, **kwargs):\n cache_key = make_key(key, *args, **kwargs)\n \n data = memcache.get(cache_key)\n if data is not None:\n logging.debug(\"Cache Hit: %s\" % cache_key)\n return deserialize_entities(data) if returns_entity else data\n \n logging.debug(\"Cache Miss: %s\" % cache_key)\n data = fxn(*args, **kwargs)\n serialized = serialize_entities(data) if returns_entity else data\n memcache.set(cache_key, serialized, time)\n \n return data\n return wrapper\n \n return decorator", "def memoized(method):\n method.cache = {}\n\n def invalidate(*args, **kwargs):\n key = _represent_args(*args, **kwargs)\n if not key:\n method.cache = {}\n elif key in method.cache:\n del method.cache[key]\n else:\n raise KeyError(f\"Not prevously cached: {method.__name__}({key})\")\n\n def new_method(*args, **kwargs):\n \"\"\"Cache the args and return values of the call\n\n The key cached is the repr() of args\n This allows more types of values to be used as keys to the cache\n Such as lists and tuples\n \"\"\"\n key = _represent_args(*args, **kwargs)\n if key not in method.cache:\n method.cache[key] = method(*args, **kwargs)\n return method.cache[key]\n\n new_method.invalidate = invalidate\n new_method.__doc__ = method.__doc__\n new_method.__name__ = f\"memoized({method.__name__})\"\n return new_method" ]
[ "0.8607585", "0.85202116", "0.8493943", "0.84850585", "0.8431629", "0.83458835", "0.830083", "0.82646495", "0.82383895", "0.82372373", "0.82353806", "0.8204611", "0.8204611", "0.8204611", "0.8190656", "0.814723", "0.81472206", "0.80591935", "0.80570954", "0.8055087", "0.8041502", "0.8034564", "0.80266255", "0.79938143", "0.7986371", "0.7968395", "0.79320824", "0.78871274", "0.78783935", "0.78541976", "0.7807379", "0.7790907", "0.7746813", "0.77429587", "0.77259487", "0.7723103", "0.77226526", "0.7718005", "0.7718005", "0.7710645", "0.7658581", "0.7594796", "0.7581825", "0.7581513", "0.75399756", "0.74615693", "0.7414452", "0.7373688", "0.73216283", "0.72999525", "0.72833204", "0.7257413", "0.7249684", "0.7227139", "0.7227139", "0.721007", "0.721007", "0.721007", "0.71791285", "0.7142717", "0.71355563", "0.6977657", "0.6968786", "0.6944147", "0.6939736", "0.6917164", "0.6791461", "0.6787826", "0.67849547", "0.6768335", "0.67622554", "0.67384064", "0.66682404", "0.66253966", "0.6591356", "0.65798235", "0.6559845", "0.65559393", "0.6505686", "0.65020156", "0.64245456", "0.64088", "0.63786334", "0.63625455", "0.63405865", "0.6337006", "0.6331813", "0.63219637", "0.629473", "0.6255453", "0.622989", "0.62137663", "0.6205938", "0.6204529", "0.6199196", "0.6190049", "0.6187001", "0.61746985", "0.6174478", "0.6158053" ]
0.68058383
66
Automatically starts the thread.
def __init__(self, file_name, max_queue_size=100): threading.Thread.__init__(self) self.daemon = True self.sqlite3_conn = sqlite3.connect(file_name, check_same_thread=False, detect_types=sqlite3.PARSE_DECLTYPES) self.sqlite3_cursor = self.sqlite3_conn.cursor() self.sql_queue = queue.Queue(maxsize=max_queue_size) self.results = {} self.max_queue_size = max_queue_size self.exit_set = False # Token that is put into queue when close() is called. self.exit_token = str(uuid.uuid4()) self.start() self.thread_running = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n self._setup_thread()\n self.thread.start()", "def start(self):\n self.thread.start()", "def start(self):\n self._thread.start()", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()", "def start_thread(self):\n self.stop_thread()\n self.running = True\n self.run_thread = threading.Thread(target=self.run, daemon=True)\n self.run_thread.start()", "def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()", "def run(self):\n self.started()", "def start(self):\n if not self._Thread__initialized:\n raise RuntimeError('thread.__init__() not called')\n if self._Thread__started.is_set():\n raise RuntimeError('threads can only be started once')\n with threading._active_limbo_lock:\n threading._limbo[self] = self\n try:\n start_new_background_thread(self.__bootstrap, ())\n except Exception:\n with threading._active_limbo_lock:\n del threading._limbo[self]\n raise\n self._Thread__started.wait()", "def start(self):\n if self.__running:\n raise RuntimeError('already started')\n self.__hook_thread = threading.Thread(target=self.__run_hook, args=(self.__build_runner(),))\n self.__running = True\n self.__hook_thread.start()", "def start(self):\n\n if self.thread is None:\n self.thread = threading.Thread(\n target=self.__run__,\n daemon=True,\n )\n\n self.thread.start()\n LOGGER.debug(\n \"Starting thread `%s` for event loop `%s`.\",\n self.ident,\n self.thread.ident,\n )", "def start(self):\n self._task.start()", "def start(self):\n self._task.start()", "def start(self):\n\n self._task.start()", "def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)", "def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()", "def activate(self):\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start()", "def __init__(self):\n Thread.__init__(self)\n self.start() # start the thread", "def start(self):\n with self._lock:\n if not self.started():\n self._started = None\n getattr(self.factory, 'start_' + self.class_name())(self)", "def start_background_thread(self):\n self.runner = Runner(queue=queue, app_id=self.app_id)\n self.runner.start()\n # TODO: stop the thread at some point?", "def start(self):\n self.synchronizer = SyncThread(self.api, self.sync_dir)\n self.synchronizer.start()\n self.tray.on_login()", "def starting(self) -> None:\n self._prepopulate_runnables()\n self._loop_handler = threading.Thread(target=self._loop)\n self._loop_handler.daemon = True\n self._loop_handler.start()", "def start(self):\r\n if self._ready:\r\n return\r\n\r\n self._start()\r\n self._ready = True", "def start(self):\n if self._start is not None:\n raise ValueError, \"task %s already started\" % self._name\n self._start = 1\n self.run()", "def start(self):\n return self._thread.start()", "def run():\r\n autostartup()", "def start(self):\r\n thread = threading.Thread(target=self.run)\r\n try:\r\n thread.start()\r\n except RuntimeError as e:\r\n raise SchedulerError(f\"Failed to start worker '{self.WORKER_ID}': \" + str(e))", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def start(self):\n self._do_work.set()\n self._worker_thread.start()", "def do_start(self, args) :\r\n if not self.wait2start:\r\n Thread(target=self.start_loop).start()\r\n self.wait2start = True\r\n else:\r\n self.__Logger.warn(\"Waiting for simulators to be ready. To force start, type \\\"forcestart\\\"\")", "def start(self):\n self.stop_recognising.clear()\n self.thread.start()", "def start():", "def start():", "def start():", "def start():", "def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)", "def on_start(self):\n self.run_in_background(self.__run_client)", "def _start_in_thread(self):\n return spawn_waitready(self._listen, self.start)[0]", "def start(self):\n self.parent.start(auto_terminate=False)\n self.started = True", "def start(self):\n self._watchdog_thread.start()", "def _ensure_thread(self) -> None:\n\n if not self._thread:\n thread = self._thread_factory(self.run)\n self._thread = thread\n thread.start()", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def start(self):\n self.reset()\n self.on_start()", "def start(self):\n self._is_waiting = False", "def start(self):\n gevent.spawn_later(self._period, self._run)", "def start(self):\n ...", "def _start(self):\n pass", "def _start_thread(self, fn, daemon=False):\n daemon = Thread(target=fn, daemon=daemon)\n daemon.start()", "def _start_loop(self):\n self.p = tread.Thread(target=self._loop)\n self.p.start()", "def start(self):\r\n pass", "def start (self):\n pass", "def start (self):\n pass", "def start(self):\n self.login(not self.quiet)\n self.start_time = time.time()\n while True:\n self.print_time()\n try:\n self.tick()\n except Exception as e:\n print(e)", "def start(self):\n gv.logger.info(\"Started playing new playlist\")\n thread = Thread(target=self.run, args=())\n thread.daemon = True\n self.thread = thread\n thread.start()", "def do_start(self, *arg):\n self._keep_looping = True\n\n print_info(\"Starting sensors\")\n\n self._loop()", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "async def start(self):", "async def start(self):", "def start(self):\n if self._callable:\n self._is_running = True\n self._run_client()", "def start(self):\n self.start_time = time.time()", "def start_non_blocking(self):\n self._start_thread(self.start, daemon=True)", "def start(self):\n if self._started:\n return\n\n self._register()\n self._started = True", "def schedule_start(self):\n self.initialize_scheduler()", "def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()", "def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()", "def start(self):\n def f():\n if (self.started): return\n self.started = True\n with client.ServerProxy(self.host) as proxy:\n while (not self.req_shutdown):\n self.update_speed(proxy)\n time.sleep(self.com_freq)\n self.started = False\n self.req_shutdwon = False\n\n Thread(target=f).start()", "def start(self):\n self.open()\n #t = Thread(target=self._cache_update, args=())\n #t.daemon = True\n #t.start()", "def start_thread(self):\n self.thread = Thread(target=self.put_lines_into_queue)\n self.thread.daemon = True\n self.thread.start()", "def connect(self):\n self.start()", "def start(self) -> None:\n logger.log(self.log_level, f'Start {self.name}...')\n self.started = True\n super().start()", "def start(self):\n \n if not self.is_running:\n self._timer = threading.Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True", "def start_pull_thread(self):\r\n threading.Thread(target=self._pull_thread).start()", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def start(self):\n if self.threadPool:\n self.threadPool.addTask(self)\n\n # Lets other threads have a chance to run\n time.sleep(0)\n else:\n raise TaskError(\"start(): nothing to start for task %s\" % self)", "def _make_thread(self):\r\n pass", "def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()", "def start(self):\r\n self.start_time = time.time()", "def background(self):\n self.thread = threading.Thread(target=self.run)\n self.thread.setDaemon(True)\n self.thread.start()", "def run(self):\n self.monitor.start()", "def start(self):\n if self._sync_thread is not None:\n raise exceptions.JournalAlreadyStarted()\n\n LOG.debug('Starting the journal sync thread')\n WAKE_UP_EVENTS[self.uuid] = threading.Event()\n self._stop_event.clear()\n self._sync_thread = threading.Thread(name='sync', target=self._run)\n self._sync_thread.start()", "def start(self) -> None:", "def start(self) -> None:", "def on_pre_enter(self):\n self.setup()\n self.start()", "def start(cls):\n\n cls._set_mode_running()\n TimeDisplay.start_time()\n for callback in cls.start_callback:\n callback()", "def start_thread(self) -> threading.Thread:\n assert self._thread is None, \"Thread has already been created.\"\n\n self._thread = threading.Thread(target=self.start)\n self._thread.start()\n return self._thread", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "async def start(self) -> None:", "def start(self):\n self.stream.start()\n self.running = True\n self.update()" ]
[ "0.8236653", "0.81681097", "0.8104186", "0.8093559", "0.774491", "0.7613398", "0.7493972", "0.7435424", "0.734553", "0.73255163", "0.7319545", "0.7309675", "0.72819525", "0.72819525", "0.72664046", "0.7243573", "0.7182632", "0.7128363", "0.71228725", "0.71228725", "0.7110407", "0.70880777", "0.70876294", "0.70695406", "0.7063615", "0.7054265", "0.70513374", "0.703592", "0.7023353", "0.7004549", "0.6996476", "0.6974137", "0.6969031", "0.6959448", "0.695924", "0.695924", "0.695924", "0.695924", "0.694236", "0.6933848", "0.69080347", "0.69047654", "0.6882024", "0.687667", "0.6868757", "0.6868757", "0.6863581", "0.6861133", "0.68607885", "0.685071", "0.6847898", "0.6844535", "0.6831823", "0.6826879", "0.6808842", "0.6808842", "0.6808823", "0.68074435", "0.67986596", "0.67835534", "0.67835534", "0.67835534", "0.67835534", "0.67835534", "0.67835534", "0.67835534", "0.67835534", "0.6764446", "0.6764446", "0.67643696", "0.67579424", "0.67487824", "0.67449176", "0.67318344", "0.67232865", "0.67232865", "0.6716919", "0.6713547", "0.67116684", "0.6700873", "0.6698191", "0.66886026", "0.6687896", "0.6679831", "0.66725785", "0.66697717", "0.6663315", "0.6662033", "0.66582435", "0.6643347", "0.6630528", "0.6622963", "0.6622963", "0.6612157", "0.65993196", "0.6596605", "0.6587981", "0.6587981", "0.6587981", "0.6585494", "0.6584174" ]
0.0
-1
Thread loop. This is an infinite loop. The iter method calls self.sql_queue.get() which blocks if there are not values in the queue. As soon as values are placed into the queue the process will continue. If many executes happen at once it will churn through them all before calling commit() to speed things up by reducing the number of times commit is called.
def run(self): logging.debug("run: Thread started") execute_count = 0 for token, query, values in iter(self.sql_queue.get, None): logging.debug("sql_queue: %s", self.sql_queue.qsize()) if token != self.exit_token: logging.debug("run: %s", query) self.run_query(token, query, values) execute_count += 1 # Let the executes build up a little before committing to disk # to speed things up. if self.sql_queue.empty() \ or execute_count == self.max_queue_size: logging.debug("run: commit") self.sqlite3_conn.commit() execute_count = 0 pass # exit if # Only exit if the queue is empty. Otherwise keep getting # through the queue until it's empty. if self.exit_set and self.sql_queue.empty(): self.sqlite3_conn.commit() self.sqlite3_conn.close() self.thread_running = False return pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\r\n num_workers = g.num_query_queue_workers\r\n wq = WorkQueue(num_workers = num_workers)\r\n wq.start()\r\n\r\n while True:\r\n job = None\r\n #limit the total number of jobs in the WorkQueue. we don't\r\n #need to load the entire db queue right away (the db queue can\r\n #get quite large).\r\n if len(running) < 2 * num_workers:\r\n with running_lock:\r\n iden, pickled_cr = get_query()\r\n if pickled_cr is not None:\r\n if not iden in running:\r\n running.add(iden)\r\n job = make_query_job(iden, pickled_cr)\r\n wq.add(job)\r\n\r\n #if we didn't find a job, sleep before trying again\r\n if not job:\r\n time.sleep(1)", "def worker(self, queue):\n with sa.create_engine(dsn).connect() as dbcon:\n while True:\n if queue.qsize() == 0:\n sleep(1)\n if queue.qsize() == 0:\n break\n continue\n item = queue.get()\n try:\n if hash(item['title']) in self.exist_products:\n dbcon.execute(Product.update().values(**item).where(Product.c.id == self.get_id(item)))\n else:\n result = dbcon.execute(Product.insert().values(**item))\n self.exist_products[hash(item['title'])] = result.inserted_primary_key[0]\n except Exception as e:\n print(type(e), e)", "def _run(self) -> None:\n while True:\n args: MigrationArgs = self._queue.get(block=True)\n with self._lock:\n if args.collection in self._chunks:\n if args.shard_key not in self._chunks[args.collection]:\n self._split_chunk(args.collection, args.shard_key)\n self._move_chunk(args)", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def run(self):\n # First thing, create a MySQL connection for this thread\n self.connect()\n\n # Start thread's ioloop\n while self.conn.running:\n result = None\n error = None\n cursor = None\n try:\n # Get next task from queue\n task = self.conn.queue.get(True)\n # Handle special abort command\n if task['command'] == 'abort':\n self.conn.queue.put(task)\n break\n\n # Ignore Transactions which are not this thread's\n tx_id = task.get('tx_id')\n if tx_id is not None:\n if tx_id != self.name:\n # Put task request back into queue and wait again\n self.conn.queue.put(task)\n continue\n\n # Handle transactions\n if task['command'] == '*begin-tx*':\n if self.in_tx:\n # Already attending a transaction, return request to queue\n self.conn.queue.put(task)\n continue\n else:\n # Signal this Thread will handle the Transaction!\n self.in_tx = True\n result = self.name\n elif task['command'] == '*end-tx*':\n if self.in_tx and task['tx_id'] == self.name:\n # This is our signal to stop attending this transaction\n self.in_tx = False\n else:\n # Not attending a transaction or it's not our transaction. Either way, ignore request\n self.conn.queue.put(task)\n continue\n else:\n # Get a DB cursor and execute query (at most 3 times!)\n retries = 3\n while retries > 0:\n try:\n cursor = self.db.cursor()\n rows_affected = cursor.execute(task['query'], task.get('args'))\n error = None\n break\n except (AttributeError, MySQLdb.OperationalError) as e:\n retries -= 1\n error = e\n cursor = None\n self.connect()\n except Exception as e:\n if cursor is not None:\n cursor.close()\n error = e\n break\n\n if error is None:\n # Determine result\n if task['command'] == 'select':\n # for a SELECT, we want the resultset\n result = list(cursor.fetchall())\n if len(result) == 0:\n result = None\n elif task['command'] == 'insert':\n # for an INSERT, we want the new ID\n result = cursor.lastrowid\n else:\n # for everything else, we'll be fine with rows_affected\n result = rows_affected\n else:\n if retries == 0:\n raise Exception('Failed 3 reconnection attempts to MySQL server: {0}'.format(e))\n except Exception as e:\n error = e\n finally:\n # Make sure we close the DB cursor!\n if cursor is not None:\n cursor.close()\n\n # Send result to the query's request-ee\n self.conn._send_result(task, result, error)\n\n # No more tasks. Close connection\n self.disconnect()", "def run(self):\n _LOGGER.info(\"Started.\")\n\n ExecutorThread.local_thread.executor_object = self\n self.__persister = _persistence.MySQLPersister()\n _persistence.PersistentMeta.init_thread(self.__persister)\n\n procedure = None\n while True:\n if procedure is None or procedure.is_complete():\n procedure = self._next_procedure(procedure)\n _LOGGER.debug(\"Executing procedure (%s).\", procedure)\n if procedure is None:\n break\n\n self.__job = self.__queue.get()\n _LOGGER.debug(\"Reading next job from queue, found %s.\",\n self.__job)\n self.__job.execute(self.__persister, self.__scheduler, self.__queue)\n self.__queue.done()\n\n _persistence.PersistentMeta.deinit_thread()", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def run(self):\n # We defer creating the Couchbase object until we are actually 'in' the\n # separate process here.\n self._connect()\n\n while True:\n next_size = None\n (i, doc, size) = self.in_queue.get()\n # We use a \"magic\" null generator to terminate the workers\n if not doc:\n # Pass the death on...\n self.out_queue.put((i, doc, size))\n break\n # Actually perform the set.\n try:\n next_size = doc.next()\n value = self.buffer[:next_size]\n self._set_with_retry('doc_' + str(i), value)\n size = next_size\n except StopIteration:\n pass\n self.out_queue.put((i, doc, size))", "def processq(self):\n\n while True:\n command = None\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if any(q):\n command = q.pop(0)\n # remember q has now changed\n if not any(q):\n self.queuefile.unlink()\n else:\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()\n\n if command:\n self.execute(command)\n else:\n break", "def _watchdog(self):\n while True:\n try:\n # Arno, 2012-07-12: apswtrace detects 7 s commits with yield 5 min, so reduce\n yield 60.0\n\n # flush changes to disk every 1 minutes\n self._database.commit()\n\n except Exception:\n # OperationalError: database is locked\n dprint(exception=True, level=\"error\")\n\n except GeneratorExit:\n if __debug__: dprint(\"shutdown\")\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n self._database.commit(exiting = True)\n break", "def threadWorker(self):\n while True:\n row = self.queue.get() #get a row of data\n if row is None: #ending criterium\n break\n self.similarityQuestions(row) #the actual working function\n self.queue.task_done() #inform the queue one task is done", "def _db_execute(self, cur, sql_query):\n self.ctx.dbq_count += 1\n \n try:\n a = time.time()\n query, params = self._process_query(sql_query)\n out = cur.execute(query, params)\n b = time.time()\n except:\n if self.printing:\n print >> debug, 'ERR:', str(sql_query)\n if self.ctx.transactions:\n self.ctx.transactions[-1].rollback()\n else:\n self.ctx.rollback()\n raise\n\n if self.printing:\n print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))\n return out", "def load(self):\n while True:\n sqldata = self._runsql()\n if not sqldata: # No rows to process. Return for now\n return True\n\n status = self._load_elastic(sqldata)\n if status[1]:\n self.logger.error(\"Errors occurred : %s\" % status[1])\n # TODO: Should we quit(return False) here \n # since there are errors ?\n\n # This should be the remainder and nothing left after that\n # since we didn't exceed max rows\n if len(sqldata) < self.max_rows:\n self.logger.info(\"Finished inserting up to %d\" % self.seq)\n return True", "def _fetch_loop(self, conn: LoggingDatabaseConnection) -> None:\n i = 0\n while True:\n with self._event_fetch_lock:\n event_list = self._event_fetch_list\n self._event_fetch_list = []\n\n if not event_list:\n # There are no requests waiting. If we haven't yet reached the\n # maximum iteration limit, wait for some more requests to turn up.\n # Otherwise, bail out.\n single_threaded = self.database_engine.single_threaded\n if (\n not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING\n or single_threaded\n or i > EVENT_QUEUE_ITERATIONS\n ):\n return\n\n self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)\n i += 1\n continue\n i = 0\n\n self._fetch_event_list(conn, event_list)", "def _execute_deferred_queries(self):\n\n assert not self.__is_connected\n\n if not self._deferred_queries:\n return\n\n with Transaction(self.__database_name) as txn:\n while True:\n try:\n query = self._deferred_queries.popleft()\n txn.session.execute(query).close()\n except IndexError:\n break", "def _run(self) -> None:\n try:\n while True:\n loop_time = self._get_time()\n loop_time_flush_interval = self._get_time(self.flush_interval.total_seconds())\n\n if loop_time >= self.flushing_interval_deadline:\n self._flush_batch()\n self.flushing_interval_deadline = loop_time + loop_time_flush_interval\n self.logger.debug('Flush interval deadline. Flushed batch.')\n\n try:\n interval = self.flushing_interval_deadline - loop_time\n item = self.event_queue.get(True, interval)\n\n if item is None:\n continue\n\n except queue.Empty:\n continue\n\n if item == self._SHUTDOWN_SIGNAL:\n self.logger.debug('Received shutdown signal.')\n break\n\n if item == self._FLUSH_SIGNAL:\n self.logger.debug('Received flush signal.')\n self._flush_batch()\n continue\n\n if isinstance(item, UserEvent):\n self._add_to_batch(item)\n\n except Exception as exception:\n self.logger.error(f'Uncaught exception processing buffer. Error: {exception}')\n\n finally:\n self.logger.info('Exiting processing loop. Attempting to flush pending events.')\n self._flush_batch()", "def execute(self):\n for db_name in list(self.__dbs):\n try:\n self.__dbs[db_name].execute()\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n traceback_log = ''.join(line for line in lines)\n sys.stderr.write('For DB [ {} ]\\n\\tError:\\n {}'.format(db_name, traceback_log))\n self.__dbs = {}\n self.last_execution_time = time()", "def _doing(self, data):\n curr = self.conn.cursor()\n curr.executemany(self.sql, data)\n self.conn.commit()\n curr.close()", "def run(self):\n while True :\n try :\n instance_id = self.queue.get()\n db.hset(application_name,instance_id,1)\n except:\n pass\n finally:\n pass", "def syncDBLoop(self):\n while 1:\n self.evSyncDB.wait()\n if self.bQuit == True:\n return\n \n self.evSyncDB.clear()\n self.db.syncDB()\n \n self.evAPI.set()", "def insert_many_execute(self) -> None:\n self.connection.isolation_level = None\n self.cursor.execute('BEGIN TRANSACTION')\n for i in self.__sql_buffer.split(';'):\n self.cursor.execute(i)\n self.__sql_buffer = \"\"\n self.cursor.execute('COMMIT')", "def _flush(self):\n tempbuf = self.databuffer\n self.databuffer = []\n self.database.runInteraction(self._executemany, tempbuf)", "def execute(cursor, query):\n while True:\n try:\n cursor.execute(query)\n break\n except Exception as e:\n print(\"Database query: {} {}\".format(cursor, query))\n print(\"Database retry reason: {}\".format(e))\n return cursor", "def execute(self):\n for move in self._queue:\n move.execute()", "def connection_work(\n conn: DBConn,\n conn_id: int,\n obj_list: List[DBObject],\n in_queue: multiprocessing.Queue, # type: ignore\n out_queue: multiprocessing.Queue, # type: ignore\n time_limit_sec: Optional[int],\n done_ctr: multiprocessing.Value,\n ) -> None:\n\n time.sleep(3)\n begin_ts = time.time()\n logger.info(\"[{}]: started thread\".format(conn_id))\n\n empty_cnt: int = 0\n\n while time_limit_sec is None or time.time() < begin_ts < time_limit_sec:\n try:\n txn = in_queue.get(timeout=2)\n except queue.Empty:\n empty_cnt += 1\n if empty_cnt > 3:\n break\n else:\n continue\n\n logger.info(\"[{}]: poped transaction {} (size = {})\".format(conn_id, txn.id, in_queue.qsize()))\n for hist_elem in process_txn(obj_list, conn, conn_id, txn):\n out_queue.put(hist_elem)\n logger.info(\"[{}]: finished transaction {}\".format(conn_id, txn.id))\n\n logger.info(\"[{}]: closing queue (size = {})\".format(conn_id, out_queue.qsize()))\n out_queue.close()\n with done_ctr.get_lock():\n done_ctr.value += 1\n logger.info(\"[{}]: finished thread (done ctr at {})\".format(conn_id, done_ctr.value))\n time.sleep(3)", "def run(self):\n assert self.queue is not None, \"Must specify queue or override run()\"\n\n while not self.terminated():\n qs = self.queue.objects.filter(status=self.queue.UNSUBMITTED,).order_by(\n \"-seq\"\n )[: django.conf.settings.DAEMONS_MAX_BATCH_SIZE]\n if not qs:\n self.sleep(django.conf.settings.DAEMONS_IDLE_SLEEP)\n continue\n\n for task_model in qs:\n try:\n self.do_task(task_model)\n task_model.status = self.queue.SUCCESS\n except AsyncProcessingIgnored:\n task_model.status = self.queue.IGNORED\n except Exception as e:\n if isinstance(e, AsyncProcessingRemoteError):\n # This is a bit messy. Do not log a trace when the\n # error is due to the remote service rejecting the request.\n # Such an error is still permanent for the task though.\n self.log.error(e)\n else:\n self.log.error('#' * 100)\n self.log.exception(f'Exception when handling task \"{task_model}\"')\n\n task_model.error = str(e)\n # if self.is_permanent_error(e):\n task_model.status = self.queue.FAILURE\n task_model.errorIsPermanent = True\n # raise\n else:\n task_model.submitTime = self.now_int()\n\n task_model.save()\n\n self.sleep(django.conf.settings.DAEMONS_BATCH_SLEEP)\n self.log.info(\"Exiting run loop.\")", "def run(self):\r\n while True:\r\n try:\r\n processor, iprot, oprot, otrans, callback = self.queue.get()\r\n if processor is None:\r\n break\r\n processor.process(iprot, oprot)\r\n callback(True, otrans.getvalue())\r\n except Exception:\r\n logging.exception(\"Exception while processing request\")\r\n callback(False, '')", "def run_async (self):\n if self.testing:\n return\n conn = Connection(self.db, self.host, self.user, self.passwd, True)\n conn.execute(self.sql)\n self.table = conn.fetch()", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)", "def _EventQueueWorker(self):\n while not self.stop:\n gevent.sleep(0)\n\n try:\n with Timer('task_process_time', False) as t:\n # Pull the next task off the queue.\n task, enqueue_times = model_provider.GetQueues().GetTask()\n\n # Retrieve the Tab Types for the NIDs so we know what handler to use.\n nid_to_type = model_provider.GetNames().GetTypes(task.keys())\n\n # Add the queued state parts to the main states.\n greenlets = [\n gevent.spawn(\n model_provider.GetStates().ReduceInto,\n nid, cid_to_sstate, nid_to_type[nid])\n for nid, cid_to_sstate in task.iteritems()]\n\n gevent.joinall(greenlets)\n\n LOG.info('Processed tasks %3dN %5dC %7.3fs' % (\n len(task), len(enqueue_times), t.Elapsed))\n\n model_provider.GetLatency().RecordForProcessed(enqueue_times)\n\n except Exception:\n LOG.error('Exception in Queue Worker loop')\n LOG.error(traceback.format_exc())\n gevent.sleep(1)", "def iterate(self):\n self._prepare_workers()\n self.prepare()\n\n self.job_queue = self.get_job_queue()\n self.job_done = self.get_job_done()\n\n self.worker_informations[\"started\"] = True\n self.write_worker_informations()\n # Ici : enregistrer son worker\n\n GARGAGE_COUNT = 0\n while True:\n\n #############################################\n ### Try to retrieve a job_id in the queue ###\n #############################################\n _start_time_queue = time.time()\n must_stop = False\n\n do_print = True\n while True:\n\n try:\n job_id = self.job_queue.remove()\n except Exception as e:\n logger.fatal(e, exc_info=True)\n raise e\n\n # Ici : on peut peut etre verifier si on a pas deja fait le job\n # ce qui peut arriver, si on a mal synchroniser en entree ? => Ex : on a relancer le controller avec ces models par default ?\n # ou si on a retirer 2 fois un model random,\n\n if job_id is not None:\n # I have found something in the queue\n break\n\n must_stop, reason = self.must_stop()\n if must_stop:\n break\n\n current_time = time.time()\n if (\n self.max_queue_waiting_time is not None\n and current_time - _start_time_queue >= self.max_queue_waiting_time\n ):\n logger.info(\"queue was empty...\")\n logger.info(\"stop waiting for queue\")\n break\n else:\n if do_print:\n logger.info(\"queue was empty...\")\n logger.info(\"wait for queue for %d sec(s)\" % self.input_queue_sleeping_time)\n do_print = False # to print only time\n time.sleep(self.input_queue_sleeping_time)\n\n ###########################################\n # max_queue_waiting_time : #\n # * None <=> inf => wait forever #\n # * -1 => don't wait at all #\n # * x => wait x seconds #\n ###########################################\n\n if job_id is None:\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = \"empty queue\"\n self.write_worker_informations()\n\n break\n\n if must_stop:\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = reason\n self.write_worker_informations()\n logger.info(\"I must stop because %s\" % reason)\n break\n\n ###########################################\n ### Retrieve the parameters of that job ###\n ###########################################\n job_param = self.data_persister.read(key=job_id, path=\"job_param\", write_type=SavingType.json)\n\n logger.info(\"start job_id : %s\" % job_id)\n logger.info(\"\")\n\n try:\n _success = False\n start_time = time.time()\n\n #################################\n ### Send job_id and job_param ###\n #################################\n yield job_id, job_param\n\n return_time = time.time()\n _success = True\n \n except Exception as e:\n logger.fatal(e, exc_info=True)\n raise e\n\n finally:\n\n if not _success:\n ####################################\n ### It means there were an error ###\n ####################################\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = \"error\"\n self.write_worker_informations()\n\n ########################\n ### Save time of job ###\n ########################\n self._all_times.append(return_time - start_time)\n\n ##################################\n ### Do a garbage collector run ###\n ##################################\n GARGAGE_COUNT += 1\n if GARGAGE_COUNT >= self.gc_collect_freq:\n GARGAGE_COUNT = 0\n gc.collect()\n\n ###############################\n ### Add job to 'done queue' ###\n ###############################\n could_add = False\n _start_time_done_queue = time.time()\n\n do_print = True\n while True:\n could_add = self.job_done.add(data=job_id)\n\n if could_add:\n break\n\n must_stop, reason = self.must_stop()\n if must_stop:\n break\n\n current_time = time.time()\n if (\n self.max_done_queue_waiting_time is not None\n and current_time - _start_time_done_queue >= self.max_done_queue_waiting_time\n ):\n logger.info(\"done queue was full...\")\n logger.info(\"stop waiting for done queue\")\n break\n else:\n if do_print:\n logger.info(\"done queue was full...\")\n logger.info(\"wait for done queue for %d sec(s)\" % self.done_queue_sleeping_time)\n\n do_print = False # to print only once\n time.sleep(self.done_queue_sleeping_time)\n\n #############################################\n # max_done_queue_waiting_time : #\n # * None <=> inf : wait for ever #\n # * -1 : don't wait at all #\n # * x : wait for x seconds #\n #############################################\n\n # Ici : regarder si on a un flag 'stop'\n\n if not must_stop:\n must_stop, reason = self.must_stop()\n\n if must_stop:\n self.worker_informations[\"stopped\"] = True\n self.worker_informations[\"stopping_reason\"] = reason\n self.write_worker_informations()\n logger.info(\"I must stop because %s\" % reason)\n break", "def run(self):\n rows = None\n if self.sql.startswith('select'):\n conn = self.table.connect()\n with conn.cursor() as curs:\n try:\n curs.execute(self.sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {self.sql}:\n {error.code}\"\"\")\n self.excep = exc\n raise exc\n else:\n rows = curs.fetchall()\n # logging.critical(f\"\"\"executed {self.sql}\"\"\")\n self.result_exec = rows", "def __call__(self, *args, **kwargs):\n with ThreadPoolExecutor(max_workers=2) as executor:\n executor.submit(self.perform_callback, *args, **kwargs)\n self.database_query = executor.submit(mark_task_as_done(self.sql_command), *self.database_args, **self.database_kwargs)", "def _runsql(self):\n self.logger.info(\"Running SQL where sequence > %s\" % self.seq)\n try:\n results = self.engine.execute(self.sql,\n (self.max_rows, self.seq)).fetchall()\n except sqlalchemy.exc.ProgrammingError, err:\n self.logger.critical(\"Error connecting to DB : %s\" % err)\n return None\n self.logger.info('Fetched %d rows from DB' % len(results))\n if not len(results):\n self.logger.info(\"No rows returned from DB. Finished loading\")\n return False\n return results", "def run(self):\n\n import time\n LOGGER.info(\"Caching thread started !\")\n\n while True:\n\n # Get all data\n # Make data visible from parent thread\n self.data = self._forge_data(self._sqla_session)\n\n # Wait 30 seconds before new processing\n time.sleep(cm.DELAY)", "def flush(self):\n while self._processing_thread.is_alive():\n with self._pending_rows_mutex:\n if len(self._pending_rows) == 0:\n break\n time.sleep(0.1)", "def execute(self,data):\n\n try:\n\n start = time.time()\n\n self.cursor.executemany(self.operation, data)\n\n end = time.time()\n\n logger.info(\"Operation [{}] took {:.3f} seconds; {} operations processed\".format(self.operation, end-start, len(data)))\n\n except Exception, exc:\n\n # Not so typical: handle integrity constraints (generate warnings)\n if exc.__class__.__name__ != \"IntegrityError\":\n raise\n\n self.conn.rollback()\n\n for record in data:\n\n try:\n self.cursor.execute(self.operation, record)\n self.conn.commit()\n\n except Exception, exc:\n\n # This record is the culprit\n if exc.__class__.__name__ != \"IntegrityError\":\n logger.error(\"Exception [{}] occurred inserting record {}\".format(exc.message, record))\n logger.error(\"Operation was: {}\".format(self.operation))\n raise\n\n error_msg = str(exc.message).rstrip()\n logger.warn( \"Integrity error (\\\"{}\\\"); data={}\".format(error_msg, record) )\n\n else:\n # If all goes well, we just need a single commit\n self.conn.commit()", "def process_queue_fast(self):\n while self.queue:\n self.queue.popleft()()", "def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())", "def _run_queries(self, queries: List[Query]) -> None:\n QUERY_TASK_LIMIT = 250\n\n while queries or self._running_queries:\n if queries:\n logger.debug(f\"Starting a new loop, {len(queries)} queries queued\")\n self._fill_query_slots(queries)\n query_tasks = self.get_running_query_tasks()[:QUERY_TASK_LIMIT]\n logger.debug(f\"Checking for results of {len(query_tasks)} query tasks\")\n for query_result in self._get_query_results(query_tasks):\n self._handle_query_result(query_result)\n time.sleep(0.5)", "def __iter__(self):\n # Trigger the consumer procs to start off.\n # We will iterate till there are no more messages available\n self.size.value = 0\n self.pause.set()\n\n while True:\n self.start.set()\n try:\n # We will block for a small while so that the consumers get\n # a chance to run and put some messages in the queue\n # TODO: This is a hack and will make the consumer block for\n # at least one second. Need to find a better way of doing this\n meta, message = self.queue.get(block=True, timeout=1)\n except Empty:\n break\n\n # Count, check and commit messages if necessary\n self.offsets[meta.partition] = message.offset + 1\n self.start.clear()\n self.count_since_commit += 1\n self._auto_commit()\n yield message\n\n self.start.clear()", "def run(self):\n self.debug('Starting new thread')\n while True:\n try:\n i, pset = self._readq.get(block=False)\n except Empty:\n break\n\n result = self.solve(pset)\n self._solveq.put((i, result))\n self.debug('Finishing thread')", "def Worker(queue, out_queue):\n while not queue.empty() and Worker.running:\n item = queue.get(False)\n if not item:\n break\n results = RunGCC(item[0], item[1])\n out_queue.put(results)", "async def _async_send_records_loop(self):\n next_cleanup = 0\n while True:\n logging.info('CachedDataWriter trying to connect to '\n + self.data_server)\n try:\n async with websockets.connect('ws://' + self.data_server) as ws:\n while True:\n try:\n record = self.send_queue.get_nowait()\n logging.debug('sending record: %s', record)\n record = {'type':'publish', 'data':record}\n await ws.send(json.dumps(record))\n response = await ws.recv()\n logging.debug('received response: %s', response)\n except asyncio.QueueEmpty:\n await asyncio.sleep(.2)\n\n except websockets.exceptions.ConnectionClosed:\n logging.warning('CachedDataWriter lost websocket connection to '\n 'data server; trying to reconnect.')\n await asyncio.sleep(0.2)\n\n # If the websocket connection failed\n except OSError as e:\n logging.warning('CachedDataWriter websocket connection to %s '\n 'failed; sleeping before trying again: %s',\n self.data_server, str(e))\n await asyncio.sleep(5)", "def run(self) -> None:\n iterable = any(hasattr(self.generator, key)\n for key in ('__iter__', '__getitem__'))\n if iterable and not self.args and not self.kwargs:\n self.__gen = self.generator\n else:\n self.__gen = self.generator(*self.args, **self.kwargs)\n for result in self.__gen:\n while True:\n if self.finished.is_set():\n return\n try:\n self.queue.put_nowait(result)\n except queue.Full:\n time.sleep(0.25)\n continue\n break\n # wait for queue to be emptied, then kill the thread\n while not self.finished.is_set() and not self.queue.empty():\n time.sleep(0.25)\n self.stop()", "def __iter__(self):\n # Trigger the consumer procs to start off.\n # We will iterate till there are no more messages available\n self.size.value = 0\n self.events.pause.set()\n\n while True:\n self.events.start.set()\n try:\n # We will block for a small while so that the consumers get\n # a chance to run and put some messages in the queue\n # TODO: This is a hack and will make the consumer block for\n # at least one second. Need to find a better way of doing this\n partition, message = self.queue.get(block=True, timeout=1)\n except queue.Empty:\n break\n\n # Count, check and commit messages if necessary\n self.offsets[partition] = message.offset + 1\n self.events.start.clear()\n self.count_since_commit += 1\n self._auto_commit()\n yield message\n\n self.events.start.clear()", "def consumer(self):\n while not self.stop_operation:\n try:\n candidate = self.q.get(timeout = 0.25)\n self.q.task_done()\n try:\n with tempfile.NamedTemporaryFile('w', suffix = '.smt2') as tmp:\n tmp.write(parser.render_smtlib(candidate.exprs))\n tmp.flush()\n res = checker.execute(options.args().cmd, tmp.name)\n except FileNotFoundError:\n logging.info('Removing the temporary file failed.')\n if checker.matches_reference(res):\n with self.result_lock:\n if self.result is None:\n self.stop_operation = True\n self.result = candidate\n except queue.Empty:\n if self.finished_generation:\n break\n self.__empty_queue()", "def _executor(self):\n data = None\n while self._connected:\n if self._reconnect:\n while True:\n try:\n self.connect()\n self._reconnect = False\n if data is not None:\n self._writeChar.write(data)\n break\n except btle.BTLEException as e:\n logging.getLogger(\"anki.overdrive\").error(e.message)\n self._reconnect = True\n try:\n data = self._writeQueue.get_nowait()\n self._writeChar.write(data)\n data = None\n except queue.Empty:\n try:\n self._peripheral.waitForNotifications(0.001)\n except btle.BTLEException as e:\n logging.getLogger(\"anki.overdrive\").error(e.message)\n self._reconnect = True\n except btle.BTLEException as e:\n logging.getLogger(\"anki.overdrive\").error(e.message)\n self._reconnect = True\n self._disconnect()\n self._btleSubThread = None", "def execute(self, query, data=None):\n data_from_db = None\n for _ in range(self.times_b4_data_lost):\n try:\n self.cursor = self.conn.cursor()\n logging.info(\"Try {}, {}\".format(query, data))\n if data:\n self.cursor.execute(query, data)\n else:\n self.cursor.execute(query)\n if query.startswith(\"SELECT\"):\n data_from_db = self.cursor.fetchall()\n self.cursor.close()\n except Exception as e:\n logging.exception(e)\n self.create_connection()\n else:\n break\n # TODO: check if this useful or not (cursor.close() + conn.close())\n # finally:\n # self.conn.close()\n # if self.cursor:\n # self.cursor.close()\n return data_from_db", "def run_multiple(self, sql, it):\n self.database.executemany(sql, it)", "def run(self): \n next_period = self._interval\n next_time = time()\n\n while not self.stopped.wait(next_period):\n try:\n self._execute(*self._args, **self._kwargs)\n except Exception as e:\n logging.exception(type(e).__name__ + \" is raise from \" + str(self._execute))\n if self._exception != False and isinstance(e, self._exception):\n break\n next_time += self._interval\n next_period = next_time - time()", "def process_queue(self):\n while self.input_processing_running:\n\n # Process everything in the queue.\n while self.input_queue.qsize() > 0:\n try:\n _telem = self.input_queue.get_nowait()\n self.process_telemetry(_telem)\n\n except Exception as e:\n self.log_error(\"Error processing telemetry dict - %s\" % str(e))\n\n # Sleep while waiting for some new data.\n time.sleep(0.5)", "def __iter__(self):\n if not self.is_alive() and not self.finished.is_set():\n self.start()\n # if there is an item in the queue, yield it, otherwise wait\n while not self.finished.is_set():\n try:\n yield self.queue.get(True, 0.25)\n except queue.Empty:\n pass\n except KeyboardInterrupt:\n self.stop()", "def _start_query(self) -> PQGen[None]:\n if self._autocommit:\n return\n\n if self.pgconn.transaction_status != TransactionStatus.IDLE:\n return\n\n yield from self._exec_command(self._get_tx_start_command())", "def run(self):\n while True:\n if not self.pool.queue.qsize() <= self.maxIdle:\n logging.info(\"Evicting Idle Connections, Queue Size: %s\" % self.pool.queue.qsize())\n connection = self.pool.queue.get(False)\n connection.dispose()\n time.sleep(self.delay/1000)", "def run(self):\n while True:\n # Check to see if we should stop\n if self._stop.isSet():\n logger.debug(\"Worker thread stopping.\")\n break\n\n # Try to pull from the queue\n try:\n func, args, kwargs = self.queue.get_nowait()\n func(*args, **kwargs)\n except Queue.Empty:\n time.sleep(5)\n continue\n except Exception as e:\n logger.exception(e)", "def execute(self):\n\n while True:\n\n neighbours, script, location = self.queue.get()\n\n if neighbours is None and script is None:\n self.queue.task_done()\n return\n\n self.run_script(neighbours, script, location)\n self.queue.task_done()", "def _thread_yield(dbapi_con, con_record):\n time.sleep(0)", "def _drain_queue(self):\n while self.queue:\n self._export_batch()", "def run(self):\n while True:\n try:\n processor, iprot, oprot, otrans, callback = self.queue.get()\n if processor is None:\n break\n callback.getContext().setProtocols(iprot, oprot)\n processor.process(iprot, oprot, callback.getContext())\n callback.success(reply=otrans.getvalue())\n except Exception:\n logging.exception(\"Exception while processing request\")\n callback.failure()", "def batch_execute(self, conn):\n def batches(data, batch_size) -> list:\n \"\"\"Return batches of length `batch_size` from any object that\n supports iteration without knowing length.\"\"\"\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv\n\n columns = ColumnCollection(self.columns)\n if self.header:\n self.columns = [columns.get(h) for h in next(self.data)]\n columns = ColumnCollection(self.columns)\n\n total = 0\n query = BulkInsertQuery(self.table, columns)\n for batch in batches(self.data, self.batch_size):\n total += query.execute(conn, batch) or 0\n yield total", "def commit(self):\n for db in self.values():\n db.commit()", "def run(self):\n self._stats.reset_results()\n for row in self._rows:\n self._process_row(row)\n self.__worker.wait()\n self._handle_if_errors()\n return self._stats._results", "def run(self):\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n # Fetch answer from task\n answer = next_task()\n self.task_queue.task_done()\n # Put into result queue\n self.result_queue.put(answer)\n return", "async def db_execute(self, *args, **kwargs):\n rows = []\n async with self.db_pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(*args, **kwargs)\n try:\n async for row in cur:\n rows.append(row)\n except psycopg2.ProgrammingError:\n # No results\n pass\n return rows", "def create_buffer(self, context, connection, table, *, engine):\n buffer = yield Queue()\n for row in self.commit(table, connection, buffer, force=True):\n context.send(row)", "def run(self):\n _threadpool_limits = _no_threadpool_limits\n if USE_THREADPOOL_LIMITS:\n _threadpool_limits = threadpool_limits\n\n while True:\n next_task = self.task_queue.get()\n if next_task is None:\n # Poison pill means shutdown\n self.task_queue.task_done()\n break\n with _threadpool_limits(limits=1):\n answer = next_task(self.data)\n self.task_queue.task_done()\n self.result_queue.put(answer)", "def execute(self, sql, *args, **kwgs):\n curr = self.conn.cursor()\n curr.execute(sql, *args, **kwgs)\n self.conn.commit()\n curr.close()", "def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return", "def _run(self):\n sequence = list(range(len(self.sequence)))\n self._send_sequence() # Share the initial sequence\n while True:\n if self.shuffle:\n random.shuffle(sequence)\n\n with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n for i in sequence:\n if self.stop_signal.is_set():\n return\n\n self.queue.put(\n executor.apply_async(get_index, (self.uid, i)), block=True)\n\n # Done with the current epoch, waiting for the final batches\n self._wait_queue()\n\n if self.stop_signal.is_set():\n # We're done\n return\n\n # Call the internal on epoch end.\n self.sequence.on_epoch_end()\n self._send_sequence() # Update the pool", "def run(self):\n LOG.debug(\"ReaderThread up and running\")\n\n lastevictTime = 0\n while ALIVE:\n for col in allLivingCollectors():\n for line in col.collect():\n self.processLine(col, line)\n now = int(time.time())\n if now - lastevictTime > self.evictinterval:\n lastevictTime = now\n now -= self.evictinterval\n for col in allCollectors():\n col.evictOldkeys(now)\n # BUGBUG : not good\n time.sleep(1)", "def _worker(self):\n while True:\n request = self.queue.get()\n self.worker(request)\n self.queue.task_done()", "def run(self):\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass", "def _run_queries(self, persist):\n self._mark_start()\n self._run_query()\n\n # Check if we want to keep going.\n if not persist:\n self._compile_results()\n return\n\n # Get the rest of the content.\n while not self._done():\n self._run_query()\n\n # Create the actual statements.\n self._compile_results()\n\n # This is end of the loop, one way or another. Restore logging if it\n # was redirected.\n request_logger.unquiet()\n util_logger.unquiet()\n return", "def run():\r\n event = threading.Event()\r\n while (event.is_set() == False):\r\n # perform database backup\r\n backup()\r\n\r\n # sleep for the predefined amount interval\r\n event.wait(BACKUP_INTERVAL)", "def run(self):\n self.initialize()\n\n self.engine = setup_db_connection(driver=\"Fake\")\n self.logger = multiprocessing.get_logger()\n self.logger.handlers[0] = setup_logging()\n\n self.logger.debug(\"\\n\\n\")\n self.logger.debug(f'Spawning Worker')\n self.logger.debug(\"\\n\\n\")\n\n self.time_start_process = time.time()\n self.time_start_cycle = time.time()\n\n # -------------------------------\n # Start Processing Data\n\n\n data_unprocessed = self.get_data_from_queue()\n\n df = pd.DataFrame()\n\n df = self.process_data(data_unprocessed)\n\n if not df.empty:\n self.insert_data_into_database(df)\n\n # -------------------------------\n\n self.check_status(\"COMPLETED\")\n return", "def __call__(self, connection, table, buffer, context, row, engine):\n\n buffer.put(row)\n\n yield from self.commit(table, connection, buffer)", "def isolate_db_query(self, query_fn, args=(), kwargs={}):\n acc_time = 0.0\n while True:\n t_start = time.time()\n try:\n ret = []\n self._isolate_db_query(query_fn, ret, args, kwargs)\n if ret:\n return ret[0]\n else:\n return\n except sqlite3.OperationalError:\n acc_time = acc_time + time.time() - t_start\n if acc_time > self.wait_timeout:\n raise\n time.sleep(sys.float_info.epsilon) # Really just need to yield.", "def worker(self):\n while True: # Feed forever. Enqueue will block when queue is full.\n while len(self.memory) < self.min_memory:\n time.sleep(1)\n batch = self.memory.sample(self.batchsize)\n states, actions, rewards, terminals = zip(*batch)\n self.session.run(self.enqueue_op, {\n self.states: states, self.actions: actions,\n self.rewards: rewards, self.terminals: terminals,\n })", "def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()", "def run(self, to_process, duplicates):\n self.db_m = database_manager.DatabaseManager(self.settings)\n try:\n # Process queue while is not empty\n while True:\n data = to_process.get(True, 1)\n duplicate_count = self.consume_data(data)\n with duplicates.get_lock():\n duplicates.value += duplicate_count\n except queue.Empty:\n pass", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def loopCommitToDatabase(self, tiltseriesdata):\n\t\treturn self.commitToDatabase(tiltseriesdata)", "def iterate(query, callback=lambda x: x, batch_size=1000, verbose=True):\n start = time.time()\n count = 0\n results = query.fetch(batch_size)\n while results:\n rstart = time.time()\n for row in results:\n output = callback(row)\n if output:\n print output\n count += 1\n if verbose:\n print '%s rows processed in %.1fs' % (count, time.time() - rstart)\n print 'total time: %.1fs' % (time.time() - start)\n results = query.with_cursor(query.cursor()).fetch(batch_size)\n callback()\n print 'total rows: %s, total time: %.1fs' % (count, time.time() - start)", "def execute(self):\n for coll in list(self.__bulks):\n try:\n bulkOp = self.__bulks[coll]\n curr_result = Counter(bulkOp.execute())\n self.update_results(coll, curr_result)\n except BulkWriteError as bwe:\n sys.stderr.write(str(bwe.details))", "def ExecuteMySQL(self, query):\n errorCount=0\n errorLog={}\n for x in range(0, 10):\n try:\n cnxn=mysql.connector.connect(user=self.username,password=self.pwd,\\\n host=self.server,database=self.DB)\n cursor=cnxn.cursor()\n cursor.execute(query)\n cnxn.commit()\n cursor.close()\n cnxn.close()\n str_error = None\n break\n except Exception as str_error:\n if errorCount == 9:\n print(errorLog)\n sys.exit()\n else:\n errorLog[errorCount]=str(str_error)\n errorCount+=1\n # wait for 2 seconds before trying again\n sleep(2) \n pass", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def execute_queries():\n fetch_job_listings(engine)\n update_job_listing(engine)", "def drain_results_queue(self):\n while len(self._scheduled) > 0:\n self.process_results()", "def execute(timeSync):\r\n while True:\r\n runSched(timeSync)\r\n # Syncdb()\r", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m})", "def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m, self.y:y})", "def _writeloop(self):\r\n while self._ll_alive:\r\n ## Add a thread lock\r\n if not self._uart_tx_queue.empty():\r\n data = self._uart_tx_queue.get()\r\n #clear the response list before send the command\r\n #self._uart_rx_queue.clear()\r\n #self.log.debug(\"Uart send cmd:\",data)\r\n #time.sleep(0.01)\r", "def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise", "def __iter__(self):\n while True:\n results = self.poll()\n for x in results:\n yield x\n if not results:\n time.sleep(self.poll_delay)", "def run(self):\n print \"CAlled run in querythread\"\n #global config.mqchannel\n self.qstatus = \"Running\"\n start = time.clock()\n \n self.crawl_async_result = crawl.apply_async(args=[self.start_url, self.max_depth, self.parser], serializer=\"json\")\n while not self.crawl_async_result.ready():\n time.sleep(0)\n \n # self.crawl_async_result is a list of { URLs, links, htmls } to be parsed\n \n self.crawlstatus = \"Done\"\n self.elapsed = (time.clock() - start)\n print \"Crawl Done\"\n print json.dumps(self.crawl_async_result.result, indent=4)\n \n self.__insert_into_db(self.crawl_async_result.result)\n content = json.dumps({\"query_id\":self.qid, \"message\":\"done\", \"dbkey\":str(self.dbkey), \"time\":self.elapsed});\n config.mqchannel.basic_publish(exchange=config.get(\"MQEXCHANGE\"), routing_key='', body=content)", "def __iter__(self):\n try:\n i = self.db[self._headKey]\n while True:\n yield i\n i = self.db[self._getNextKey(i)]\n except KeyError:\n pass", "def _periodically_process_pending_rows(self):\n while self._run_process_pending_rows:\n try:\n before = time.time()\n try:\n self.process_pending_rows()\n except ResourceExhaustedError:\n _handle_resource_exhausted_error()\n continue\n except WorkbookSpaceNeededError:\n self._handle_workbook_space_needed_error()\n continue\n except Exception as ex:\n _debug_print(f\"Exception in process_pending_rows(): {ex}\")\n continue\n\n if (\n self._add_rows_time > self.max_time_per_process_loop\n or self.rows_in_active_sheet > MAX_EVENTS_TO_SPLIT_TO_NEW_SHEET\n ):\n # its taking too long to add rows to the sheet. Rotate\n _debug_print(\n f\"triggering rotation as the add_rows_time was: {self._add_rows_time} and rows_in_active_sheet was {self.rows_in_active_sheet}\"\n )\n self._rotate_to_new_sheet_in_workbook()\n\n after = time.time()\n time.sleep(self._calculate_periodic_loop_sleep_time(after - before))\n except Exception as ex:\n _debug_print(\n f\"Exception made it to the top of the loop in _periodically_process_pending_rows(): {traceback.format_exc()}\"\n )\n _handle_resource_exhausted_error()\n continue", "def run(self):\r\n counter = 0\r\n counter_increment = 1000 # Reporting frequency\r\n\r\n last_time = 0\r\n \r\n if get_param(\"record_queue_state\"):\r\n # Add event to query queue state.\r\n query_interval = 1\r\n report_queue_state = RecordQueueState(self.servers,\r\n self.stats_manager,\r\n query_interval)\r\n self.event_queue.put((query_interval, report_queue_state))\r\n while len(self.stats_manager.completed_jobs) < self.total_jobs:\r\n assert(not self.event_queue.empty())\r\n current_time, event = self.event_queue.get()\r\n \r\n #if current_time >= 3.0 * get_param(\"total_time\") / 4.0:\r\n # set_param(\"relative_weights\", \"1,2\")\r\n #elif current_time >= 1.0 * get_param(\"total_time\") / 2.0:\r\n # set_param(\"relative_weights\", \"1,4\")\r\n\r\n assert(current_time >= last_time)\r\n last_time = current_time\r\n\r\n if current_time > counter:\r\n counter = counter + counter_increment\r\n new_events = event.run(current_time)\r\n if new_events:\r\n for new_event in new_events:\r\n self.event_queue.put(new_event)\r\n \r\n self.stats_manager.output_stats()\r\n \r\n output_params()" ]
[ "0.7164139", "0.6617921", "0.6452826", "0.6395609", "0.6328278", "0.62974733", "0.62549317", "0.6231184", "0.616941", "0.60640925", "0.6058464", "0.5961299", "0.588708", "0.5882663", "0.5875031", "0.5870313", "0.5865822", "0.5852583", "0.5845655", "0.5839005", "0.5820537", "0.58019185", "0.5799526", "0.5791952", "0.57594746", "0.57514244", "0.5749388", "0.57383335", "0.57342356", "0.57119244", "0.5709281", "0.56923157", "0.56579345", "0.5632925", "0.5630789", "0.56300026", "0.55977964", "0.5590822", "0.55857736", "0.558084", "0.557299", "0.55586594", "0.5556598", "0.5555248", "0.55460346", "0.55434024", "0.5532476", "0.5516133", "0.55140615", "0.5495657", "0.5480338", "0.54786813", "0.5456684", "0.54468215", "0.5432785", "0.54310197", "0.54278487", "0.54255795", "0.54172343", "0.5409947", "0.54058605", "0.5393181", "0.5375792", "0.53747684", "0.53689873", "0.5368789", "0.53668845", "0.53603077", "0.5358582", "0.5357813", "0.53547984", "0.5346979", "0.53452086", "0.5344912", "0.5340846", "0.5334569", "0.53329444", "0.5332493", "0.532629", "0.5316225", "0.5315755", "0.5315153", "0.53135574", "0.5310858", "0.5307627", "0.5306303", "0.53028184", "0.529887", "0.5298679", "0.5292205", "0.5289975", "0.5288455", "0.52815586", "0.52815366", "0.5279804", "0.5278255", "0.52767557", "0.527571", "0.52747047", "0.5265876" ]
0.7875983
0
Close down the thread and close the sqlite3 database file.
def close(self): self.exit_set = True self.sql_queue.put((self.exit_token, "", ""), timeout=5) # Sleep and check that the thread is done before returning. while self.thread_running: time.sleep(.01) # Don't kill the CPU waiting.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_db(connection: sqlite3.Connection):\n connection.commit()\n connection.close()", "def close_db(self):\n with nostderr():\n tb.file._open_files.close_all()", "def close(self):\n\t\tself.db.close()", "def cleanup(_):\n self.db.close()", "def close(self):\n if getattr(self, \"_db\", None):\n self._db.close()\n self._db = None", "def close_db(nothing):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n self._db.close()\n self._db = None", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n self._db.close()\n self._db = None", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n self._db.close()\n self._db = None", "def close(self):\n self.db.close()", "def _close_database(self):\n assert self._conn is not None\n logging.info(\"Closing file {!r}.\".format(self._filename))\n self._cursor.close()\n self._cursor = None\n self._conn.close()\n self._conn = None\n self._filename = None\n self._sessionUID = None", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "async def close(self):\n print('Close {}'.format(str(self.__hash__)))\n await self.db.close()", "def tteardown_class(self):\n os.close(self.db_fd)\n os.unlink(app.config['DATABASE'])", "def close_db_connection(cls):\n db.close()", "def close_db(error):\n if hasattr(g, \"sqlite_db\"):\n g.sqlite_db.close()", "def close_db(error):\r\n if hasattr(g, 'sqlite_db'):\r\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()", "def close(self):\n self.__database__.close()", "def close_db():\n if ( not g.get( 'db' ) is None ):\n g.db.dispose()\n\n g.db = None", "def close_db(error):\n\tif hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db.close()", "def close_db(error):\n\tif hasattr(g, 'sqlite_db'):\n\t\tg.sqlite_db.close()", "def close_db():\n if \"db\" in context:\n context[\"db\"].close()", "def close(self):\n self.clean()\n\n for conn in self.conn__.values():\n try:\n conn.commit()\n conn.close()\n except sqlite3.ProgrammingError:\n pass", "def close_db(error):\n\n if hasattr(g, \"sqlite_db\"):\n g.sqlite_db.close()", "def close(db):\n storage.close()", "def finalise(self):\n log.info(\"KenwoodDatabase finalised.\")\n self.db_file.close()", "def close_database(exception):\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()", "def close_database(exception):\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()", "def close_database(exception):\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()", "def tearDown(self):\n self.db.close()\n self.dbfile.close()\n os.unlink(self.path)", "def close(self):\n if getattr(self, \"_db\", None) is not None:\n if not self._db_args['autocommit']:\n self._db.commit()\n self._db.close()\n self._db = None", "def close_database(exception):\n # top = _app_ctx_stack.top\n # if hasattr(top, 'sqlite_db'):\n # top.sqlite_db.close()\n client.close()", "def close_db(error):\n conn = g.get('sqlite_db', None)\n if conn is not None:\n conn.close()", "def close(self):\n# self.cursor.close()\n\tself.db.close()", "def disconnect(self):\n self.db.close()", "def disconnect(self):\n if self.db:\n try:\n self.db.close()\n finally:\n self.db = None", "def close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n if error is not None:\n print( \"An error has been produced.\")", "def close(self):\n self.db.commit()\n self.db.close()", "def close_db_connection(exception):\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()", "def close(self):\n self._flush()\n self.database.close()", "def close(error):\n if hasattr(flask.g, 'sqlite_db'):\n flask.g.sqlite_db.close()", "def teardown(self):\n try:\n self._close(True)\n except:\n pass\n try:\n os.unlink(os.path.join(self.home_dir, DB_FILE))\n except FileNotFoundError as _:\n pass", "def close(self):\n if self._dbopen:\n self._dbcon.close()", "def shutdown(self) -> None:\n with self.table_access_condition:\n curr_thread = threading.get_ident()\n self.conn[curr_thread].close()\n del self.conn[curr_thread]", "def tearDown(self):\n os.close(self.db_fd)\n os.unlink(closet.app.config['DATABASE'])", "def tearDown(self):\n os.close(self.db_fd)\n os.unlink(mainPyUnit.app.config['DATABASE'])", "def close(self):\n self.cursor.close()\n self.db.close()", "def tearDown(self):\n os.close(self.db_fd)\n os.unlink(pegasus.app.config['DATABASE'])", "def shutdown_db():\n print('Exit.')\n global conn\n conn.close()", "def db_close(e=None):\n database = g.pop('db', None)\n\n if database:\n database.close()", "def teardown_db(exception):\n storage.close()", "def teardown_db(exception):\n storage.close()", "def teardown_db(exception):\n storage.close()", "def teardown_db(exception):\n storage.close()", "def Close(self):\n l_logger.debug(\"Closing database connection\")\n self.db.close()", "def __del__(self):\n\t\t# self.close() \n\t\tself.connected = False\n\t\tprint('db close in destructor')", "def close(self):\n\t\tif self.connected:\n#\t\t\t\t self.__db.close()\n\t\t\tself.__engine.dispose()\n\t\t\tself.connected = False", "def close(self):\n self.db.remove()\n self.engine.dispose()", "def close_database(db):\n global DB_CONNECTIONS\n con = DB_CONNECTIONS.get(db, None)\n if con is not None:\n db_name = Path(db).name\n logging.info(f\">>> Closing database `{db_name}` <<<\")\n # Closing will delete the connection. An in-memory DB will lose all data permanently.\n # See https://stackoverflow.com/questions/48732439/deleting-a-database-file-in-memory\n con.close()\n DB_CONNECTIONS.pop(db, None)", "def close_db_connection(exception):\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()", "def close_db_connection(exception):\n top = _app_ctx_stack.top\n if hasattr(top, 'sqlite_db'):\n top.sqlite_db.close()", "async def on_shutdown(self):\n await self.controller.database.close()", "def tearDown(self):\n os.close(self.db_fd)\n os.unlink(app.app.config['DATABASE'])", "def close_db(error):\n storage.close();", "def close_db(error):\n storage.close()", "def cleanup(self) -> None:\n\n self._update_thread.requestInterruption()\n self._update_thread.schedule_update_event.set()\n if self._update_thread.wait(1) is False:\n logging.info(\"not enough time to stop thread 0.5\")\n self._save_feeds()\n self._sqlite_connection.close()", "def close(self):\n if self.data is not None:\n self.writeDataToFile()\n if self.wantAnyDbm:\n self.data.close()\n taskMgr.remove('%s-syncTask'%(self.className,))\n self.data = None", "def after_request():\n db.close()", "def teardown_db(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()", "def teardown_db(e):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()", "def close_db_while_running(self):\n self.close()\n try:\n yield\n finally:\n self.dismod_file.engine = get_engine(self._filename)", "def close_database(self):\n if self._conn is not None:\n self._conn.close()\n self._conn = None", "def close_database(self) -> None:\n self.api = None\n self.base = None\n self.table = None", "def disconnect():\r\n try:\r\n global connection\r\n global cursor\r\n\r\n connection.close()\r\n logger.info(\"The SQLite connection is closed\")\r\n\r\n except sqlite3.Error as error:\r\n logger.error(f\"Error while connecting to sqlite: {error}\")", "def closedb(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()", "def close_db(e=None):\n db = g.pop(\"db\", None)\n\n if db is not None:\n db.close()", "def close_db(e=None):\n db = g.pop(\"db\", None)\n\n if db is not None:\n db.close()", "def tearDown(self):\n DBSession.close()\n daemons.execute_in_thread('radicale', lambda: transaction.commit())\n teardown_db()\n transaction.commit()\n DBSession.close_all()\n config['tg.app_globals'].sa_engine.dispose()", "def close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()", "def close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()", "def close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()", "def cleanup():\n if connection:\n try:\n if not connection.closed:\n connection.close()\n except:\n result.note_exception(cause=\"Exception raised while closing database.\")\n result[\"db_unable_to_close\"] = dsn\n if cleanup_db:\n params = {'dsn':cleanup_db,'user':self.user_name.encode('ascii'),'password':self.user_password.encode('ascii')}\n try:\n c = fdb.connect(**params)\n c.drop_database()\n except:\n result.note_exception(cause=\"Test cleanup: Exception raised while dropping database.\")\n return", "def close_db(e=None) -> None:\r\n db = g.pop('db', None)\r\n if db is not None:\r\n db.close()" ]
[ "0.72728205", "0.726023", "0.71503633", "0.71176696", "0.7114434", "0.7111585", "0.7087267", "0.7087267", "0.7087267", "0.7081732", "0.70814943", "0.6973683", "0.69685507", "0.69284976", "0.68923795", "0.6879302", "0.6870084", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68667877", "0.68635607", "0.6861828", "0.68560016", "0.68560016", "0.6838204", "0.68269384", "0.67945313", "0.67875606", "0.67654324", "0.67467177", "0.67467177", "0.67467177", "0.6744823", "0.66729444", "0.6672302", "0.66722566", "0.6668937", "0.6667504", "0.6665834", "0.663746", "0.6637364", "0.66295725", "0.6619337", "0.66155267", "0.6601742", "0.660099", "0.65941274", "0.6591877", "0.6586709", "0.6575399", "0.6567046", "0.6556869", "0.65546817", "0.65442514", "0.65442514", "0.65442514", "0.65442514", "0.65411955", "0.65368336", "0.6527197", "0.6526376", "0.6520002", "0.65156263", "0.65156263", "0.65142864", "0.6509508", "0.64752936", "0.64571726", "0.6440524", "0.6433402", "0.6431724", "0.64223665", "0.6421442", "0.641357", "0.64080185", "0.63826334", "0.63762885", "0.63735217", "0.6354767", "0.6354767", "0.6339264", "0.6336372", "0.6336372", "0.6336372", "0.6334355", "0.6323096" ]
0.6315703
100
Return the queue size.
def queue_size(self): return self.sql_queue.qsize()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getqueuesize(self):\n return self._queuesize", "def queue_size(self) -> int:\n return self._queue.qsize()", "def queue_size(self):\n return self._queue_size", "def qsize(self):\r\n return len(self._queue)", "def queue_size(self):\n # pylint: disable=protected-access\n if self._handler._received_messages:\n return self._handler._received_messages.qsize()\n return 0", "def qsize(self) -> int:\n return len(self._queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.queue)", "def size(self):\n return len(self.__queue)", "def qsize(self) -> int:\n return self._queue.qsize()", "def queue_size(self) -> ConfigNodePropertyInteger:\n return self._queue_size", "def size(self):\r\n return len(self.queue)", "def size(self):\r\n return len(self.queue)", "def get_message_queue_size(self):\n with self.lock:\n return len(self.message_queue)", "def size(self):\n return len(self._queue_items)", "def qsize(self):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n try:\n size = self.__db.llen(self._key)\n except redis.ConnectionError as e:\n raise redis.ConnectionError(repr(e))\n return size", "def queue_size(self):\n return len(self.groups)", "def ctrlqueue_queue_size(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(10), ctypes.c_int32(0))", "def size(self):\n return self.url_queue.qsize()", "def queue_count(self):\n with self.mutex:\n return len(self.queues)", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def Length(self):\n return len(self.jobQueue)", "def qsize(self):\n return self.q_size.current_value", "def get_queue_size(queue):\n with current_celery_app.pool.acquire(block=True) as connection:\n bound_queue = queue.bind(connection)\n _, size, _ = bound_queue.queue_declare(passive=True)\n return size", "def __len__(self):\n\n return len(self._queue)", "def record_queue_size():\n statsd.gauge('rabbitmq.size', rabbitmq_queue_size())", "def getQueueCount(self, force=False):\n if self.queuecount == -1 or force:\n self.queuecount = self.db.queuecount()\n\n return self.queuecount", "def __len__(self) -> int:\n return len(self._data_queue)", "def get_size(self, node: int):\n return self._servers[node].size + self._queues[node].size", "def api_get_queue_len():\n try:\n ret = AppStatus.check_manager_status(brief=False)\n if ret is not None:\n return jsonify({\"total_queue_len\": ret.get(\"total_queue_len\", 0)})\n except Exception as e:\n logger.error(\"Traceback:\\n%s\", traceback.format_exc())\n abort(500, \"failed to send message or invalid manager response\")", "def get_size(self) -> int:\n return self.__size", "def qsize(self): \n return self.__db.llen(self.key)", "def get_Q_size(self):\n return len(self.qTable)", "def _queue_len(queue_path: str) -> int:\n if not os.path.exists(queue_path):\n return 0\n return len([path for path in os.listdir(queue_path)])", "def get_amount_queued(self):\n return self._num_queued", "def qsize(self) -> int:\n pass", "def __len__(self):\n\n return len(self._block_queue)", "def len(self) -> int:\n\n return int(self.q)", "def size(self):\n if self.is_free():\n return self._size\n else:\n return self._size * -1", "def size():\r\n qry = ImportQueue.query.filter(or_(\r\n ImportQueue.status != COMPLETE,\r\n ImportQueue.status != ERROR))\r\n return qry.count()", "def count_remaining(self):\n\t\treturn self._queue.qsize()", "def count_remaining(self):\n\t\treturn self._queue.qsize()", "def size(self): #returns the size or number of items in the stack\n if self.is_empty():\n return 0\n else:\n return self.num_items", "def get_size(self):\n return self.__size", "def get_size(self):\n return self.__size", "def getsize(self):\n return self.__size", "def size(self) -> int:\n return self._size", "def size(self) -> int:\n return self._size", "def size(self) -> int:\n return self._size", "def size(self) -> int:\n return self._size", "def size(self) -> int:\n return self._size", "def get_size(self):\r\n return self.__size", "def get_size(self):\r\n\r\n return self._size", "def queue_message_count(self, queue_name):\n queue_list = self.__session.getObjects(_class=\"queue\", _name=queue_name)\n if len(queue_list):\n return queue_list[0].msgDepth", "def get_size(self):\n return len(self.get_payload()) + 4", "def __get_size(self):\n return self.__size", "def get_size(self):\n return self._size", "def get_size(self):\n return self._size", "async def size(self) -> int:", "def size(self):\r\n if self.full():\r\n return self.capacity()\r\n else:\r\n size = self._read_index - self._write_index\r\n if size < 0:\r\n return self.capacity() + size # wrap around\r\n else:\r\n return size", "def size(self): \n return self.qSize", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size", "def get_size(self):\r\n return self._size", "def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size", "def size(self):\n\t\treturn self._size", "def get_size(self):\n return self.__size", "def size(self):\r\n return self._size", "def size(self):\n\t\treturn self._count", "def get_heap_size(self):\r\n return self.size", "def getsize(self):\n try :\n return self.size\n except:\n raise ReferenceError", "def size(self):\n return self._size", "def size(self):\n return len(self.heap)", "def size(self):\n return len(self.heap)", "def size(self):\n return len(self.heap)", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\r\n return self._size", "def size(self) -> int:\n _args: list[Arg] = []\n _ctx = self._select(\"size\", _args)\n return _ctx.execute_sync(int)", "def getSize(self):\r\n list = self.getList()\r\n return len(list)", "def getSize(self):\n return self.__size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size" ]
[ "0.92133445", "0.9186504", "0.91504335", "0.8877782", "0.8795246", "0.87893367", "0.8775905", "0.8775905", "0.8775905", "0.8775905", "0.8757166", "0.8754371", "0.8695905", "0.8690609", "0.8690609", "0.8606325", "0.85473734", "0.8437442", "0.8317189", "0.82109904", "0.81546557", "0.8149988", "0.81052965", "0.79456085", "0.78452873", "0.78255314", "0.78071856", "0.7685579", "0.7668705", "0.7647467", "0.754643", "0.75016606", "0.7489479", "0.7479997", "0.74768454", "0.74394053", "0.74317116", "0.74254787", "0.74250776", "0.740157", "0.73952395", "0.730126", "0.72978187", "0.72978187", "0.7288237", "0.7233786", "0.7233786", "0.7231953", "0.7207921", "0.7207921", "0.7207921", "0.7207921", "0.7207921", "0.71987927", "0.71830165", "0.7170912", "0.7169699", "0.71595025", "0.7157663", "0.7157663", "0.71535826", "0.7149804", "0.7148235", "0.714033", "0.714033", "0.714033", "0.7139891", "0.71369916", "0.71365124", "0.7109048", "0.70945907", "0.7074451", "0.706637", "0.70477307", "0.70438486", "0.7037809", "0.7037809", "0.7037809", "0.70332485", "0.70332485", "0.70332485", "0.70332485", "0.70332485", "0.7030874", "0.7029615", "0.7025303", "0.7020963", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654", "0.70193654" ]
0.84914595
17
Get the query results for a specific token.
def query_results(self, token): delay = .001 while True: if token in self.results: return_val = self.results[token] del self.results[token] return return_val # Double back on the delay to a max of 8 seconds. This prevents # a long lived select statement from trashing the CPU with this # infinite loop as it's waiting for the query results. logging.debug("Sleeping: %s %s", delay, token) time.sleep(delay) if delay < 8: delay += delay pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listSearches(self, authenticationToken):\r\n pass", "async def _perform_get_results(self, login_token, result_token):\n data = {\"resultSetToken\": result_token, \"token\": login_token}\n return await self._perform_request(\"get-results\", data, lambda r: r.json())", "def get_query_results(QueryExecutionId=None, NextToken=None, MaxResults=None):\n pass", "def retrieve_results(self, token: str = '', measurement_id: str = ''):\n with open(self.config_file) as json_file:\n data = json.load(json_file)\n if token == '':\n token = data[self.server][self.license_key][self.user.email][\"user_token\"]\n\n if token == '':\n raise ValueError(\"No user token provided. Please log in.\")\n\n if not measurement_id or measurement_id == '':\n res = self.measurement.retrieve()\n else:\n res = self.measurement.retrieve(measurement_id=measurement_id)\n return res", "def getSearch(self, authenticationToken, guid):\r\n pass", "def listSearches(self, authenticationToken):\r\n self.send_listSearches(authenticationToken)\r\n return self.recv_listSearches()", "def getSearch(self, authenticationToken, guid):\r\n self.send_getSearch(authenticationToken, guid)\r\n return self.recv_getSearch()", "def query_results(self):\n return self.details[KEY_QUERY_RESULTS]", "def query(self):\n query_url = self.get_query_url()\n logging.info('Querying: ' + query_url)\n json_data = request.urlopen(query_url).read().decode()\n logging.debug('Retrieved the following ' + json_data)\n response = json.loads(json_data)\n\n return self.get_docs_from_response(response)", "def search(token, query):\n\n url = util.get_url() + f\"drive/root/search(q='{query}')\"\n response = util.rest(\"GET\", url, token)\n\n if response.status_code > 400:\n raise Exception(\"Error \", response.text)\n\n return json.loads(response.text), response.status_code", "def search_v1(query_tokens, inverted_index):\n return []", "def psirt_query(token):\n url = 'https://api.cisco.com/security/advisories/cvrf/latest/10'\n headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Bearer ' + token,\n }\n last_10_vulns = requests.get(url, headers=headers)\n logger.info('query response code = ' + str(last_10_vulns.status_code))\n logger.debug(last_10_vulns)", "def list_recommendations_by_next_token(self, token):\n return self.list_recommendations(next_token=token)", "def get(self, token):\n args = (token, )\n row = self.db_manager.execute_sql_and_fetchone(SQL_TOKEN_GET, args)\n if row:\n token_object = convert_db_row_to_dict(row, TOKEN_MODEL_FIELDS)\n else:\n token_object = {}\n return token_object", "async def _perform_query(self, query, login_token):\n data = {\"username\": self.user, \"query\": query.lower(), \"token\": login_token}\n return await self._perform_request(\"query\", data, lambda r: r.text())", "def getResults():", "def get(self, token):\n try:\n result = self.table.get_item(Key={\"token\": token})\n return result[\"Item\"]\n except Exception as e:\n raise self.RedirectDoesNotExist(f\"token: {token}; exception: {str(e)}\")", "def token_resources(token):\n\n headers = {\n 'accept': 'application/json',\n }\n\n url = 'https://reactome.org/AnalysisService/token/%s/resources' % token\n\n try:\n response = requests.get(url=url, headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.json()\n else:\n print('Status code returned a value of %s' % response.status_code)", "def multiQuery(self, query, limit):\n try:\n results = self.sp.search(query, limit)\n resultLists = results['tracks']['items']\n return resultLists\n except spotipy.SpotifyException as se:\n self.authenticate()\n return self.multiQuery(query, limit)", "def search(token, query):\n format_query = query.replace(\" \", \"%20\")\n url = 'https://api.thetvdb.com/search/series?name=' + format_query\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text)\n show_list = json_data.get('data')\n for show in show_list:\n if show.get('status') == 'Continuing':\n show_id = show.get('id')\n s = create_show(token, show_id)\n return s", "def get_results(self):\n return self.results", "def get_results(self):\n return self.results", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def get(self, url, token=None):\n return self.app.get(url,\n headers=_token_header(token))", "def _get_fresh_results(session, query_id, result_id):\n response = session.get('{}/api/queries/{}/results/{}.json'.format(REDASH_HOST, query_id, result_id))\n return response", "def get_results(self):\n\n return self.results_", "def get_results(self):\n return self._do_action_under_lock(self._get_all_results)", "def get_user_messages_by_token(token):\n session = get_session_by_token(token)\n if not session['success']:\n return session\n\n return get_user_messages_by_email(token, session['data']['user'])", "def get(self, url, token=None):\n headers = {}\n if token:\n headers = token_header(token)\n return self.app.get(url, headers=headers)", "def request(self, token):\n pass", "def GetResults(self):\n return self._results", "def get_user_data_by_token(token, include_hash=False):\n session = get_session_by_token(token)\n if not session['success']:\n return session\n\n return get_user_data_by_email(token, session['data']['user'], include_hash)", "def get_results_for(t_client, search_q):\n results = t_client.search(q=\"#\"+search_q)\n\n # This can be refactored\n return [\n {\n \"author\": \"@%s\" % t.from_user,\n \"text\": t.text,\n \"id\": t.id,\n \"date_h\": t.created_at.strftime(\"%H:%M:%S %d/%m/%Y\"),\n \"date\": time.mktime(t.created_at.timetuple()),\n } for t in results\n ]", "def request_data(self, search_query=None, app_index=0):\n tweet_obj_fields = utils.tweet_object_fields()\n tweet_fields = ','.join(tweet_obj_fields[\"twitter_fields\"])\n params = {'query': search_query, \n 'tweet.fields': tweet_fields}\n\n if search_query is None:\n raise AttributeError(\"No query parsed.\")\n\n base_url = \"https://api.twitter.com/2/tweets/search/recent?\"\n headers = self.get_bearer_header(app_index)\n response = requests.get(base_url, headers=headers, params=params)\n return response", "def get_project_list(token):\n session = requests.Session()\n session.headers.update({'Authorization': f'Token {token}'})\n url = get_project_list_url()\n r = session.get(url=url)\n return r", "def _run_async_query(self, context):\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n results = result.get(\"results\", [])\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def _run_async_query(self, context):\n url = self._build_url(\"/_search\")\n request = self._build_request(0, -1)\n resp = self._cb.post_object(url, body=request)\n result = resp.json()\n self._total_results = result[\"num_found\"]\n self._count_valid = True\n results = result.get(\"results\", [])\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def get_results(self):\n return self.result", "def list_named_queries(NextToken=None, MaxResults=None):\n pass", "def search(self, mode=None, **kwargs):\n\n url = self.base + \"search{}.json\".format(\"\" if mode is None else \"/\" + mode)\n if self.token != \"\":\n kwargs.update({\"access_token\": self.token})\n result = requests.get(url, params=kwargs)\n return(result)", "def _query_accessToken(self, APIToken):\n print('WaPOR API: _query_accessToken')\n\n base_url = '{0}'\n request_url = base_url.format(\n self.path['sign_in'])\n\n if self.print_job:\n print(request_url)\n\n request_headers = {\n 'X-GISMGR-API-KEY': APIToken}\n\n # requests\n try:\n resq = requests.post(\n request_url,\n headers=request_headers)\n # resq.raise_for_status()\n except requests.exceptions.HTTPError as err:\n raise Exception(\"WaPOR API Http Error: {e}\".format(e=err))\n except requests.exceptions.ConnectionError as err:\n raise Exception(\"WaPOR API Error Connecting: {e}\".format(e=err))\n except requests.exceptions.Timeout as err:\n raise Exception(\"WaPOR API Timeout Error: {e}\".format(e=err))\n except requests.exceptions.RequestException as err:\n raise Exception(\"WaPOR API OOps: Something Else {e}\".format(e=err))\n else:\n resq_json = resq.json()\n try:\n resp = resq_json['response']\n # print(resp)\n\n if resq_json['message'] == 'OK':\n return resp\n else:\n print(resq_json['message'])\n except BaseException:\n print('WaPOR API ERROR: APIToken \"{v}\"'.format(\n v=APIToken))\n print('WaPOR API ERROR: Cannot get {url}'.format(\n url=request_url))", "def _single_fetch(self, to_skip: int) -> set:\r\n all_tokens = []\r\n query = gql_queries.generate_query_all_tokens(to_skip)\r\n \r\n r = requests.post(\r\n self.graph_node_url,\r\n json = {\"query\": query},\r\n headers = {\"Content-Type\": \"application/json\"}\r\n )\r\n \r\n if r.status_code == 200:\r\n for t in r.json()[\"data\"][\"tokens\"]:\r\n all_tokens.append(\r\n Token(\r\n t[\"name\"],\r\n t[\"symbol\"],\r\n self.w3.toChecksumAddress(t[\"id\"]),\r\n t[\"decimals\"]\r\n ).__dict__() # Hacky way to bypass the use of Token for now\r\n )\r\n return all_tokens\r\n else:\r\n return []", "async def get_results(self):\n try:\n return await self._get_gist_data(comm_type='results')\n except Exception:\n self.log.debug('Retrieving results over c2 (%s) failed!' % self.__class__.__name__)\n return []", "def search_results(self):\r\n route_name = self.request.matched_route.name\r\n mdict = self.matchdict\r\n rdict = self.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n # Always search the fulltext content\r\n with_content = True\r\n\r\n conn_str = self.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n params = self.params\r\n page = params.get('page', 0)\r\n count = params.get('count', 50)\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif self.request.user and self.request.user.username:\r\n username = self.request.user.username\r\n\r\n res_list = searcher.search(\r\n phrase,\r\n content=with_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page,\r\n )\r\n\r\n # if the route name is search_ajax we want a json response\r\n # else we just want to return the payload data to the mako template\r\n if 'ajax' in route_name or 'api' in route_name:\r\n return {\r\n 'success': True,\r\n 'message': \"\",\r\n 'payload': {\r\n 'search_results': [dict(res) for res in res_list],\r\n 'result_count': len(res_list),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }\r\n }\r\n else:\r\n return {\r\n 'search_results': res_list,\r\n 'count': len(res_list),\r\n 'max_count': 50,\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }", "def _get(self, query=None):\n if not query:\n ticket_data = DB_TICKET_TABLE.all()\n else:\n ticket_data = DB_TICKET_TABLE.search(query)\n\n res = {\n \"total_queried\" : len(ticket_data),\n \"_embedded\" : {\n \"tickets\" : self.embed_ticket_data_in_result(ticket_data)\n },\n \"_links\" : self.make_links({\n \"self\" : TicketList.get_self_url(),\n \"contained_in\" : Root.get_self_url()\n })\n }\n return res", "def query(self, queries):\n final_result = []\n results = self.__return_query('query', queries)\n if results == None:\n return None\n else:\n if len(results) > 1:\n for result in results:\n final_result.append(result['data'])\n else:\n final_result = results\n return final_result", "def list(self):\n return self.results_list", "def get_list_of_results(self):\n return self.__result_list", "def callQuery(self, query, numResults=1):\n try:\n if numResults < 1:\n raise Exception(f\"Invalid numResults: {numResults}\")\n\n if numResults == 1:\n return self.singleQuery(query)\n\n else:\n return self.multiQuery(query, limit=numResults)\n\n except IndexError:\n print(\"No results.\")\n except spotipy.SpotifyException:\n print(\"Session expired. Re-authenticating.\")\n self.authenticate()", "def results(self) -> List[str]:\n if self.get_from_redis(\"results\") != \"None\":\n return json.loads(self.redis_client.hget(self.root_path, \"results\"))\n return list()", "def _api_get(self, query):\n if not isinstance(query, list):\n query = [query]\n\n req = list()\n with requests.Session() as session:\n for q in query:\n r = session.get(self._url + q, headers={'Content-Type': 'application/json',\n 'Accept': 'application/json'}, auth=self._auth,\n timeout=self._request_timeout_secs)\n r.raise_for_status()\n req.append(r)\n return req", "def get_results(query):\n user_agent = \"WDQS-example Python/%s.%s\" % (sys.version_info[0], sys.version_info[1])\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\", agent=user_agent)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n return sparql.query().convert()", "async def fetch_query(session, metric, provider, weight):\n provider = Provider(session=session, metrics_provider=provider)\n value = await provider.query(metric)\n return QueryResult(metric=metric, value=normalize(metric, value), weight=weight)", "def _run_async_query(self, context):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n self._total_results = len(results)\n self._count_valid = True\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def results(self, period_start=None, period_end=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/results'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def query(self, query, limit=10):\n try:\n rows, word_ids = self.get_match_rows(query)\n except OperationalError:\n print(\"No documents found for \\\"{}\\\"\".format(query))\n return\n\n scores = self.get_scored_list(rows, word_ids)\n\n ranked_scores = sorted([(score, url) for (url, score) in scores.items()], reverse=1)\n for (score, urlid) in ranked_scores[0: limit]:\n print(\"{}\\t{}\".format(score, self.get_url_name(urlid)))\n\n return word_ids, [r[1] for r in ranked_scores]", "def get_results(self, table=None, query=None):\n def determine_query():\n \"\"\"\n Determines the query by checking if a query arg was passed.\n \"\"\"\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"\n\n def __to_list(__results):\n \"\"\"\n Converts DB results to a (persistent) list.\n \"\"\"\n rows = []\n for row in __results:\n rows.append(row)\n \n __results.close()\n\n return rows\n\n\n # Determine query and execute\n results = __to_list(self.execute_query(determine_query()))\n\n return results", "def api_query(self, **kwargs):\n with self._api_lock:\n return self._api_query(kwargs)", "def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })", "def results(self):\n return self._result_list", "def __get_results(self, query):\n return self.mysql.query_multi_with_fetchall_as_dict(query)", "def getResults(self) -> Tuple[str, Results]:\n\n return self.moss_results", "def query(self, command, lang, topk, field='context'):\n if field not in ['context', 'context_title']:\n raise ValueError(f\"Cannot search by {field}!\")\n if lang not in self.searcher:\n raise RuntimeError(f\"Language '{lang}' not added\")\n\n esccommand = self.parser_context[lang].escape(command)\n if field == 'context':\n query = self.parser_context[lang].parse(esccommand)\n else:\n # query = self.parser_multi[lang].parse(esccommand)\n query = MultiFieldQueryParser.parse(\n esccommand,\n ['title', 'context'],\n [BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD],\n self.analyzer[lang])\n scoreDocs = self.searcher[lang].search(query, topk).scoreDocs\n\n docs = []\n for scoreDoc in scoreDocs:\n doc = self.getDoc(scoreDoc, lang)\n docs.append(Namespace(score=scoreDoc.score, id=int(doc.get('id')), doc=doc, lang=lang))\n return docs", "def perform_request(endpoint, token) -> dict:\n return requests.get(endpoint, headers={\"Authorization\": \"Bearer \"+token[\"access_token\"]}).json()", "def query():\n data = {'version': config.API_VERSION}\n args = flask.request.args\n limit = args.get('limit', config.DEFAULT_QUERY_LIMIT)\n offset = args.get('offset', 0)\n q = args.get('q', '')\n table = args.get('table')\n filter_params = {'filter': args.get('filter')}\n try:\n total, result = db_client.search(table, q,\n limit, offset,\n **filter_params)\n data['result_count'] = total\n data['results'] = result\n except db.InvalidTable:\n data['error'] = 'Invalid table:'+str(table)\n\n return flask.jsonify(data)", "def query_tickets(self):\n return self._call_txtrader_api('query_tickets', {})", "def results():\n\n queryName = request.form['query']\n queryStars = request.form['stars']\n \n datasource = DataSource()\n listOfRestaurantNames = datasource.searchRestaurantsByNameAndMinimumStars(queryName, queryStars)\n restaurants = datasource.generateRestaurantObjects(listOfRestaurantNames[:15])\n\n return render_template('results.html', restaurants=restaurants)", "def queries(self):\n request = Request(method=\"get\", endpoint=\"/query/current\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)", "def results(self):\n\n return self._search_resut", "def test_get_report_request_list_by_next_token(self, api_instance: Reports):\n params = api_instance.get_report_request_list(next_token=\"RXmLZ2bEgE\")\n self.assert_common_params(params, action=\"GetReportRequestListByNextToken\")\n assert params[\"NextToken\"] == \"RXmLZ2bEgE\"", "def get_all_users():\n token = request.headers.get('token')\n\n # Token Validation\n token_valid, response = is_token_valid(token)\n if not token_valid:\n return response\n token_username = response\n\n # Privilege handling\n if token_username != 'admin':\n return jsonify({'message': \"You aren't allowed to access this\"}), 404\n\n return jsonify(list(Users.values())), 200", "def getResults(self):\n return self.Results", "def search(api_token, base_url, search_params):\n\n request_url = f\"{base_url}data_files/api_search\"\n request_data = search_params\n\n # Add Auth/API token to request_data\n request_data['auth_token'] = api_token\n\n # -- Set up the http request and handle the returned response\n data = urllib.parse.urlencode(request_data, True)\n data = data.encode('ascii')\n req = urllib.request.Request(request_url, data)\n with urllib.request.urlopen(req) as response:\n the_page = response.read()\n\n encoding = response.info().get_content_charset('utf-8')\n records = json.loads(the_page.decode(encoding))\n\n return records", "def get(self, filters=None, pagination=None, sort=None):\n filters = filters or {}\n if not is_user_action_allowed('manage_others_tokens'):\n filters['_user_fk'] = current_user.id\n\n sm = get_storage_manager()\n\n result = sm.list(models.Token, filters=filters,\n pagination=pagination, sort=sort)\n\n return result", "def fetch_doc_tokens(self, document_level, find_query_mixin={}):\n if document_level != 'postwise':\n raise NotImplementedError('document_level:%s' % document_level)\n\n query = {'subreddit':self.subreddit, document_level:{'$exists':True}}\n query.update(find_query_mixin)\n\n for doc in self.posts_read.find(query):\n try:\n yield doc[document_level]['tokens']\n except KeyError:\n # XXX: this shouldn't happen...\n print 'woop, doc missing %s.tokens' % document_level", "def _perform_query(self, from_row=0, max_rows=-1):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n for item in results:\n yield self._doc_class(self._cb, item[\"id\"], item)", "def test_get_report_list_by_next_token(self, api_instance: Reports):\n params = api_instance.get_report_list(next_token=\"5u6Of2fS8B\")\n self.assert_common_params(params, action=\"GetReportListByNextToken\")\n assert params[\"NextToken\"] == \"5u6Of2fS8B\"", "def get_data_of_token_holder(token):\n response = requests.get(\n f'{GITHUB_API_URL}/user',\n headers={\n 'Accept': 'application/vnd.github.v3+json',\n 'Authorization': f'token {token}',\n },\n )\n response.raise_for_status()\n return response.json()", "def get(self):\n global hits\n return {\"hits\": hits}, 200", "def run_graphql(query: str, token: str):\n response = requests.post(\n 'https://api.github.com/graphql',\n json={'query': query},\n headers={'Authorization': 'Bearer ' + token})\n response.raise_for_status()\n return response.json()", "def get_from_token(token, session):\n return session.query(User).filter(User.access_token == token).first()", "def execute(self):\n return SLAResults(self.execute_votable(), self.getqueryurl())", "def execute(self):\n return SLAResults(self.execute_votable(), self.getqueryurl())", "def get_data(self):\n return self._results", "def test_get_report_request_list_by_next_token_alias(self, api_instance: Reports):\n params = api_instance.get_report_request_list_by_next_token(\"0hytxbkaOb\")\n self.assert_common_params(params, action=\"GetReportRequestListByNextToken\")\n assert params[\"NextToken\"] == \"0hytxbkaOb\"", "def get(self, search_query, page_number):\n with self.get_conn() as conn:\n c = conn.cursor()\n query = \"\"\"SELECT search_results FROM movies\n WHERE search_query = ?\n AND page_number = ?\n AND strftime('%s', 'now') - timestamp < ?\"\"\"\n\n return c.execute(query, (search_query, page_number, cache_expiration)).fetchone()", "def get_query_result(self):\n\n self.construct_query()\n\n uri = \"https://uts-ws.nlm.nih.gov\"\n content_endpoint = \"/rest/search/{0}?string={1}&sabs={2}&returnIdType={3}\".format(\n self.version, self.identifier, self.source, self.returntype)\n\n self.query = {'ticket':self.AuthClient.getst()}\n\n r = requests.get(uri+content_endpoint, params=self.query)\n\n items = json.loads(r.text)\n self.jsonData = items[\"result\"]\n\n #print(self.jsonData)\n\n ##uncomment the print statment if you want the raw json output, or you can just look at the documentation :=)\n #https://documentation.uts.nlm.nih.gov/rest/concept/index.html#sample-output\n #https://documentation.uts.nlm.nih.gov/rest/source-asserted-identifiers/index.html#sample-output\n #print (json.dumps(items, indent = 4))", "def get_results(self, task_id=None):\n\n m_query = None\n if task_id:\n m_query = '<get_results task_id=\"%s\"/>' % scan_id\n else:\n m_query = '<get_results/>'\n\n return self.__manager.xml(m_query, xml_result=True)", "def getTokens(username):\n tokens = users.find({\"Username\": username})[0][\"Tokens\"]\n return tokens", "def query(self, q):\n for key in self.metadb.query(q):\n yield key, self.datadb[key]", "def execute(self):\n return RegistryResults(self.execute_votable(), self.queryurl)", "def search(bearer_token, term, location, offset = None, SEARCH_LIMIT = 3):\n #'limit': SEARCH_LIMIT,\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': None,\n 'offset':offset\n }\n return request(API_HOST, SEARCH_PATH, bearer_token, url_params=url_params)", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return res", "def get_token_by_caller_token(self, tokenId):\r\n params ={}\r\n params['TokenId'] = tokenId\r\n \r\n response = self.make_request(\"GetTokenByCaller\", params)\r\n body = response.read()\r\n if(response.status == 200):\r\n rs = ResultSet()\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs\r\n else:\r\n raise FPSResponseError(response.status, response.reason, body)", "def _get_query_results(self, query_result_id: str) -> Dict:\n url_inputs = {'redash_host': self.redash_host, 'query_result_id': query_result_id}\n results_url = REDASH_QUERY_RESULTS_ENDPOINT.format(**url_inputs)\n resp = r.get(results_url, headers=self.headers)\n return resp.json()", "def general_tendency(token):\n # Avoid SQL injection before doing requests\n # with the token and check the validity of it.\n token = MySQLdb.escape_string(token)\n if not validator_db.valid_token(token):\n return custom_response(400, responseMessage.BAD_TOKEN)\n\n return jsonify(query_db.get_query_db(mysqlRequests.GET_GLOBAL_TENDENCY,\n header=True))", "def solr_query(config, solr_host, fq, solr_collection_name):\n # solr_collection_name = config['solr_collection_name']\n\n getVars = {'q': '*:*',\n 'fq': fq,\n 'rows': 300000}\n\n url = f'{solr_host}{solr_collection_name}/select?'\n response = requests.get(url, params=getVars)\n return response.json()['response']['docs']", "def getUser(self, authenticationToken):\r\n pass", "def results(self):\n if not self._results:\n self.read_results()\n return self._results", "def test_get_token_supply_all_using_get(self):\n pass" ]
[ "0.6801665", "0.67875", "0.67327553", "0.62611675", "0.6151787", "0.6134605", "0.5989142", "0.596726", "0.5918041", "0.5917684", "0.5901937", "0.5862396", "0.5783096", "0.5776911", "0.57683307", "0.5756532", "0.5746983", "0.5745782", "0.571591", "0.57145137", "0.5698427", "0.5698427", "0.56116724", "0.5599927", "0.5571779", "0.55698764", "0.5563344", "0.55357367", "0.54741216", "0.5448918", "0.542579", "0.5417171", "0.54146814", "0.5363412", "0.5358864", "0.5350452", "0.5350452", "0.5334941", "0.53326833", "0.5306029", "0.5298674", "0.5280249", "0.525747", "0.5255278", "0.5251427", "0.52512974", "0.5242942", "0.52420634", "0.5233495", "0.52328366", "0.5231518", "0.52205306", "0.52137625", "0.519527", "0.5193422", "0.5189334", "0.5184316", "0.51830924", "0.51808196", "0.51789254", "0.5178268", "0.5176296", "0.51751775", "0.5167009", "0.51575106", "0.51516587", "0.5150605", "0.51396096", "0.5137692", "0.5133284", "0.51317114", "0.5130352", "0.51283866", "0.5124385", "0.51229286", "0.51174515", "0.5115394", "0.5108353", "0.5108087", "0.51074463", "0.51073205", "0.5094495", "0.5094495", "0.5094431", "0.5085897", "0.5082879", "0.50800455", "0.5080002", "0.5077268", "0.5076954", "0.5072301", "0.5069099", "0.50664955", "0.50658876", "0.5064047", "0.5056869", "0.5055379", "0.5038744", "0.50382036", "0.50360835" ]
0.7163675
0
Create and save a user with the given username, email, and password.
def _create_user(self, username, email, password, **extra_fields): if not username: raise ValueError('The given username must be set') email = self.normalize_email(email) username = self.model.normalize_username(username) user = self.model(username=username, email=email, **extra_fields) user.set_password(password) user.save(using=self._db) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_user(self, email, password,username, **extra_fields):\r\n if not email:\r\n raise ValueError('The given email must be set')\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n email = self.normalize_email(email)\r\n user = self.model(email=email,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('Username is required.')\n if not email:\n raise ValueError('Email is required.')\n if not password:\n raise ValueError('Password is required.')\n try:\n with transaction.atomic():\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise", "def create_a_user(self, username='fry', email='fry@futur.ama', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.password = make_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, username, **extra_fields):\n if not email:\n raise ValueError(_('Email is required.'))\n if not username:\n raise ValueError(_('Username is required.'))\n email = self.normalize_email(email)\n username = username\n user = self.model(email=email, username=username, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, username, email, password, **extra_fields):\n if not email:\n raise ValueError('The email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def signup(cls, username, first_name, last_name, email, password):\n\n hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')\n\n user = User(\n username=username,\n first_name=first_name,\n last_name=last_name,\n email=email,\n password=hashed_pwd,\n )\n\n db.session.add(user)\n return user", "def _create_user(self, username, name,\n email, password, **extra_fields):\n if not email:\n raise ValueError('Email field is required')\n email = self.normalize_email(email)\n user = self.model(\n username=username,\n name=name,\n email=email,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, username, firstname, lastname, password, **other_fields):\n\n if not email:\n raise ValueError(_('You must provide an email address'))\n\n email = self.normalize_email(email)\n user = self.model(email=email, username=username, firstname=firstname, lastname=lastname, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, username, first_name, last_name, password):\n\n email = self.normalize_email(email)\n\n user = self.model(\n email=email,\n username=username,\n first_name=first_name,\n last_name=last_name\n )\n\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def create_user(username, email, password):\n return User.objects.create_user(username=username, email=email, password=password)", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None,commit=True):\n\n\n user = self.model(\n email=self.normalize_email(email),\n username = username\n )\n\n user.set_password(password)\n if commit:\n user.save(using=self._db)\n\n return user", "def _create_user(self, username, email, password):\n\t\tnow = datetime.now()\n\t\tif username is None:\n\t\t\traise ValueError('Must include username')\n\t\tif email is None:\n\t\t\traise ValueError('Must include email')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(\n\t\t\temail=self.normalize_email(email),\n\t\t\tusername=username,\n\t\t\tdate_joined=now\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_user(self, username, email, password, **other_fields):\n if not username or not email:\n raise ValueError(_('The email and username must be set.'))\n email = self.normalize_email(email)\n\n user = self.model(username=username, email=email, **other_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, username, full_name, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n if not username:\n raise ValueError('The given username must be set')\n if not full_name:\n raise ValueError('The given full name must be set')\n\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(\n email=email, username=username, full_name=full_name, **extra_fields\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None):\n if username is None:\n raise TypeError('Users must have a username.')\n\n if email is None:\n raise TypeError('Users must have an email address.')\n\n user = self.model(username=username, email=self.normalize_email(email))\n user.set_password(password)\n user.save()\n\n return user", "def create_user(self, username=\"foo\", email=\"foo@foo.com\", pwd=\"password\"):\n with app.app_context():\n user = User(username=username,\n email=email,\n pwd=bcrypt.generate_password_hash(pwd))\n db.session.add(user)\n db.session.commit()", "def create_user(self, username, email, password=None):\n\n if not username:\n raise ValueError('Users must have an username')\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n if not username:\n raise ValueError('Users must have a username')\n \n user = self.model(email = self.normalize_email(email),\n username = username)\n \n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user_service(username: str, email: str, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user = User(username=username, email=email, password=hashed_password)\n db.session.add(user)\n db.session.commit()", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user_Api(self,password,username, **extra_fields):\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n user = self.model(email=username,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def create_user(self, username, email: str = None, password: str = None, **kwargs):\n return self._create_user(username, email=email, password=password, **kwargs)", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError('The given email must be set')\n if not username:\n raise ValueError('The given username must be set')\n \n user = self.model(email=self.normalize_email(email), username=username)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, first_name, last_name, password, **extra_fields):\n if not email:\n raise ValueError(_('Email Address is required'))\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n **extra_fields\n )\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, password, email, name):\n\n duplicate_check = User.query.filter_by(username=username).first()\n if duplicate_check is not None:\n return\n user = User(username=username, password=password, email=email, name=name)\n db.session.add(user)\n db.session.commit()", "def create_user(self, username, email, password=None):\n if not username:\n raise ValueError('Users must have an username')\n\n if not email:\n raise ValueError('Users must have an email address')\n\n if not password:\n raise ValueError('Users must have an password')\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n print(\"create user\")\n return user", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def register(cls, username, email, password):\n\n hashed_password = bcrypt.generate_password_hash(password).decode(\"UTF-8\")\n user = User(username=username, email=email, password=hashed_password)\n db.session.add(user)\n\n return user", "def create_user(self, username, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(cls, name, username, email, password):\n new_user = cls(name=name,\n username=username,\n email=email\n )\n new_user.password = bcrypt.generate_password_hash(\n password).decode('utf-8')\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def create_user(self, username=None, email=None, password=None):\n\t\treturn self._create_user(username, email, password)", "def create_user(self, username, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(\n email: str = Form(...),\n first_name: str = Form(...),\n last_name: str = Form(...),\n password: str = Form(...),\n) -> Dict:\n # Try to retrieve the user in the db\n user_exists = models.User.objects(email=email).first()\n if user_exists:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT, detail=f\"The username already exists\"\n )\n return user.create(email, first_name, last_name, password)", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def _create_user(self, email, password, **extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(email=email, **extra_fields)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_user(self, name, email, password):\n new_user = User(name=name, email=email, password=password)\n db.session.add(new_user)\n db.session.commit()", "def create_user(self, username, password=None, **extra_fields):\n user = self.model(username=username, **extra_fields)\n user.set_password(password)\n user.save()\n\n return user", "def _create_user(self, email, password, **extra_fields):\n\n if not email:\n raise ValueError(\"Vous devez renseigner un email!\")\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def Create_user(self, email, name, password):\n\n #validating user inputs\n if not email:\n raise ValueError('Users must have email address')\n \n #normalize email (converting all to lowercase)\n email = self.normalize_email(email)\n #create a new user object\n user = self.model(email= email, name=name)\n\n #setting the password\n user.set_password(password)\n user.save(using = self._db) #using the same model created for the profile\n\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('Please provide your email address'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n first_name = first_name\n last_name = self.last_name\n user = self.model(first_name, last_name,email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password='test', **kwargs):\n user = get_user_model().objects.create(email=email, **kwargs)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n email=self.normalize_email(email),\n username=username,\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n \n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(username=username,\n email=self.normalize_email(email),\n date_joined=now\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n username = validated_data.get('username')\n email = validated_data.get('email')\n password = validated_data.get('password')\n first_name = validated_data.get('first_name', '')\n last_name = validated_data.get('last_name', '')\n return User.objects.create_user(username, email, password, first_name=first_name,\n last_name=last_name)", "def create_user(self, username, email=None, password=None, first_name=None, last_name=None, **extra_fields):\n if not username:\n raise ValueError('Users must have a username')\n\n full_name = '%s%s%s' % (\n first_name if first_name is not None else '',\n '' if (first_name is None or last_name is None) else ' ',\n last_name if last_name is not None else ''\n )\n\n user = self.model(\n username=username,\n email=UserManager.normalize_email(email),\n full_name=full_name,\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email=None, password=None, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, username=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self,email,password,**extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\n\t\ttry:\n\t\t\twith transaction.atomic():\n\t\t\t\tuser = self.model(email=email,**extra_fields)\n\t\t\t\tuser.set_password(password)\n\t\t\t\tuser.save(using=self._db)\n\t\t\t\treturn user\n\t\texcept:\n\t\t\traise", "def create_user(email='user@example.com', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The Email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, username, first_name, last_name, email, password, is_staff, is_superuser,\n **extra_fields):\n\n now = timezone.now()\n\n if not email:\n raise ValueError('Email is Required!')\n if not username:\n raise ValueError('Username is Required!')\n\n email = self.normalize_email(email)\n user = self.model(username=username, first_name=first_name, last_name=last_name, email=email,\n is_staff=is_staff, is_active=True, is_superuser=is_superuser, last_login=now,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, first_name, last_name, **extra_fields):\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n last_login=now,\n date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, email, password=None):\n # The Username and Email cannot be Null\n if username is None:\n raise ValueError(\"User Must Have a Username\")\n if email is None:\n raise ValueError(\"User Must Have an Email\")\n\n # Removing unnecessary things from email(example:spaces)\n email = self.normalize_email(email)\n\n # Creating a user instance\n user = self.model(username=username, email=email)\n\n # Set Password Method automatically hashes the password and saves.\n # It increases Security\n user.set_password(password)\n\n # Save the current user instance to the database\n user.save(using=self._db)\n\n return user", "def create_user(self, email, username, gender, first_name, last_name, password=None):\n if not email:\n raise ValueError(\"Users must have Email\")\n if not username:\n raise ValueError(\"Users must have username\")\n\n user = self.model(\n\n # lowercase the domain portion of the email address\n email = self.normalize_email(email),\n username = username,\n gender = gender,\n first_name = first_name,\n last_name = last_name \n )\n\n #This function will hash given password from NewUser\n user.set_password(password)\n\n user.save(using=self._db)\n\n return user", "def _create_user(self, email: str, password: str, **extra_fields) -> 'User':\n if not email:\n raise ValueError(\"The given email must be set.\")\n email = self.normalize_email(email).lower()\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(cls, username, email, password, admin=False):\n try:\n cls.create(\n username = username,\n email = email,\n password = generate_password_hash(password),\n is_admin = True)\n except IntegrityError:\n raise ValueError(\"User already exists\")", "def signup(cls, username, password):\n\n hashed = bcrypt.generate_password_hash(password).decode(\"utf8\")\n user = User(\n username=username,\n password=hashed\n )\n db.session.add(user)\n\n return user", "def create_user(self, email, name, password):\n\n if not email:\n raise ValueError(\"User must have an email address\")\n email = self.normalize_email(email)\n user = self.model(email=email)\n user.set_password(password)##encripts the password into HASH\n user.save(using=self._db)\n\n return user", "def create_user(email, password):\n try:\n User(email=email, password=password)\n except IntegrityError:\n print('Error: Duplicate email address')", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, username, email, password,\n is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n user = self.model(username=username, email=email,\n is_staff=is_staff,\n is_superuser=is_superuser,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n username = validated_data.pop('username')\n email = validated_data.pop('email')\n password = validated_data.pop('password')\n user = User.objects.create_user(\n username, email, password, **validated_data)\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n\n return user", "def _create_user(self, email, password, **extra_fields):\n validate_email(email)\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save()\n return user", "def _create_user(self, **fields):\n email = fields.pop('email')\n password = fields.get('password1')\n if not email:\n raise ValueError(\"Email address is required\")\n email = self.normalize_email(email)\n user = self.model(email=email, **fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password):\n\n user = User(email=email, password=password)\n \n db.session.add(user)\n db.session.commit()\n\n return user", "def _create_user(self, email, password, **extra_fields):\n\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, email, username, password=None):\n if not email:\n raise ValueError(\"User must have an email address.\")\n\n email = self.normalize_email(email)\n user = self.model(email=email, username=username)\n\n if len(password) > settings.MAX_PASSWORD_LENGTH:\n truncated_password = password[: settings.MAX_PASSWORD_LENGTH]\n else:\n truncated_password = password\n\n user.set_password(truncated_password)\n user.save(using=self._db)\n\n return user", "def create_user(self, email: str, password: str, **extra):\n try:\n user = self.model(email=self.normalize_email(email),\n **extra)\n user.set_password(password)\n user.save(using=self._db)\n except IntegrityError as Ex:\n raise IntegrityError(\"Duplicate\")\n return user", "def _create_user(self, email, password=None, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self,email,password=None,**extra_fields):\n if not email:\n raise ValueError(\"Please provide an email\")\n user = self.model(email=self.normalize_email(email),**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(\n email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n is_activated=False,\n last_login=now,\n date_joined=now,\n **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(self, username, password, **extra_kwargs):\n if not username:\n raise ValueError(\"User must have an username\")\n\n user = self.model(username=username, **extra_kwargs)\n user.set_password(password)\n\n user.save(using=self._db)\n\n return user", "def create(self, validated_data):\n user = get_user_model().objects.create(\n username=validated_data['username'],\n )\n user.set_password(validated_data['password'])\n user.save()\n return user", "def save(self, request, validated_data):\n # Create user\n user = User.objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n username=validated_data['username'].encode('utf-8')\n )\n\n return user", "def register(cls, first_name, last_name, email, username, password):\n\n hashed = Bcrypt.generate_password_hash(cls, password, 14)\n # turn bytestring into normal (unicode utf8) string\n hashed_utf8 = hashed.decode(\"utf8\")\n\n # return instance of user w/username and hashed pwd\n user = User(first_name=first_name, last_name=last_name, email=email, username=username, password=hashed_utf8)\n db.session.add(user)\n return user", "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def _create_user(self, email, username, password, gender=2, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(email=email, username=username, gender=gender, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user" ]
[ "0.859775", "0.8441594", "0.842702", "0.8424334", "0.8394744", "0.8389154", "0.834259", "0.8335656", "0.8334432", "0.83239305", "0.82892966", "0.82822585", "0.8275777", "0.8270387", "0.82692033", "0.8165085", "0.8160581", "0.8158989", "0.8143792", "0.8138259", "0.8128023", "0.81190044", "0.81117904", "0.8099799", "0.8099458", "0.8096423", "0.8093528", "0.8078844", "0.8072498", "0.80713415", "0.80611914", "0.8055701", "0.80488", "0.8048402", "0.8041929", "0.8041929", "0.8033658", "0.8022877", "0.8020259", "0.8020259", "0.80201817", "0.7999305", "0.7995324", "0.7993038", "0.7988025", "0.79859793", "0.79840696", "0.79835033", "0.7979401", "0.7969457", "0.7968905", "0.79674125", "0.7964187", "0.79539484", "0.79415137", "0.7940779", "0.7940711", "0.7940711", "0.7940711", "0.79384935", "0.7937817", "0.79313564", "0.7927704", "0.7927704", "0.7927704", "0.7927704", "0.7927704", "0.7927704", "0.7927704", "0.7927704", "0.79116434", "0.79085", "0.7907469", "0.7906484", "0.79004854", "0.78994346", "0.7897498", "0.78951913", "0.78951913", "0.78951913", "0.78949654", "0.78941256", "0.7892799", "0.78912336", "0.7889332", "0.7887717", "0.78834814", "0.7880514", "0.7880359", "0.78707343", "0.78642136", "0.7864039", "0.78621113", "0.7861874", "0.7860384", "0.7859323", "0.7857577", "0.7855367", "0.78480893", "0.78397804" ]
0.84072465
4
A backend can raise `PermissionDenied` to shortcircuit permission checking.
def _user_has_perm(user, perm, obj): for backend in auth.get_backends(): if not hasattr(backend, 'has_perm'): continue try: if backend.has_perm(user, perm, obj): return True except PermissionDenied: return False return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def permission_required_or_403(perm, *args, **kwargs):\n kwargs['return_403'] = True\n return permission_required(perm, *args, **kwargs)", "def DeniedPermissions(self) -> _n_6_t_0:", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def forbidden():\n return HttpError(403)", "def can(self, unused_perm):\n return False", "def get_everyone_denied(self):", "def permission_denied(request):\n\treturn render(request, '403.html', None)", "def forbidden(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseForbidden, *args, **kwargs)", "def test_requires_privilege_no_such(self):\n @requires_privilege('bomboozle', domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n request = HttpRequest()\n request.role = requestor_role\n with self.assertRaises(PermissionDenied):\n view(request)", "def get_authenticated_denied(self):", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "def permission_required(permission):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not g.current_user.can(permission):\n return forbidden('Not permitted')\n return f(*args, **kwargs)\n return decorated_function\n return decorator", "def require_server_administrator():\n if not test_server_administrator():\n raise cherrypy.HTTPError(403)", "def permissions_required(permissions):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.can(permissions):\n abort(403)\n return f(*args,**kwargs)\n return decorated_function\n\n return decorator", "def test_no_permission(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n req.user = Mock()\n req.user.has_perm.return_value = False\n\n res = self.view(\n req,\n decorator=self.actions(\n self.mock_model, [\"doit\"], permission=\"do_things\")\n )\n\n self.assertEqual(res.status_code, 403)\n req.user.has_perm.assert_called_with(\"do_things\")", "def require_project_administrator(project):\n if not test_project_administrator(project):\n raise cherrypy.HTTPError(403)", "def test_no_permission(client, mocker):\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=False\n )\n resp = client.post(reverse(\"order-fulfillment\"), data={})\n assert resp.status_code == statuses.HTTP_403_FORBIDDEN", "def cant(user, action):\n\n return not can(user, action)", "def permission_request_processor(page, request):\n if not has_permission_to_view(page, request.user):\n raise PermissionDenied", "def not_allowed(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseNotAllowed, *args, **kwargs)", "def permission_denied(request, exception):\n return defaults.permission_denied(request, exception, template_name=get_template_name(request, \"403.html\"))", "def test_no_permissions(self):\n\n login(self.client)\n\n client = create_client('test')\n client.write_access = False\n client.save()\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': 3})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def resource_forbidden(exc, request):\r\n request.response_status = \"403 Forbidden\"\r\n return {'message': str(exc)}", "def app_permission_denied(self, request, message=None):\n if not request.successful_authenticator and not message:\n raise exceptions.NotAuthenticated()\n if message:\n raise exceptions.PermissionDenied(detail=message)\n raise exceptions.PermissionDenied(detail=message)", "def access_forbidden(e):\n return render_template(\"error/403.html\"), 403", "def forbidden(e):\n return render_template(\"errors/403.html\"), 403", "async def permission_valid_check(cls):\n pass", "def autz_required(permission, context=None):\n def decorator(func):\n\n @wraps(func)\n async def wrapper(*args):\n request = (args[-1].request\n if isinstance(args[-1], web.View)\n else args[-1])\n\n if await autz.permit(request, permission, context):\n return await func(*args)\n\n raise web.HTTPForbidden()\n\n return wrapper\n\n return decorator", "def test_authorization_required(self, method):\n self.user.user_permissions.clear()\n\n response = getattr(self.client, method)(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 403", "def test_forbidden(self):\n self._error_test(fitbit_exceptions.HTTPForbidden)", "def test_wrong_permission(self):\n with self.assertRaises(InvalidPermissionStringError):\n client_has_permission('test', 'asdf')", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def render_or_deny(fn):\n @wraps(fn)\n def _wrapped(obj, options, context, *args, **kwargs):\n if not obj.access_check(options, context['request'].user):\n return PermissionDeniedWidget(\n obj.uid,\n obj.classname()\n ).render(options, context)\n else:\n return fn(obj, options, context)\n return _wrapped", "def permits(identity, obj, permission):\n return False", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def test_requires_privilege_denied(self):\n\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n\n request = HttpRequest()\n request.role = requestor_role.instantiate({})\n with self.assertRaises(PermissionDenied):\n view(request)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def forbidden(self):\n self.flash(self._(\"You don't have the correct permissions to access this page.\"), category=\"error\")\n # TODO: maybe check barcamp and permissions for the barcamp homepage and redirect there instead\n # TODO: maybe create a remember decorator which remember the last page in the session which is safe to redirect to.\n # the forbidden handler should delete it though\n return redirect(self.url_for(\"index\"))", "def admin_required(f): # pragma: no cover\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if current_user.admin:\r\n return f(*args, **kwargs)\r\n else:\r\n return abort(403)\r\n return decorated_function", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)", "def has_permission(self):\n return super().has_permission()", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def cant(self, permissions: Union[str, List]) -> bool:", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def __acl__(self):\n yield 'Allow', 'system.Everyone', 'none'\n yield security.DENY_ALL", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def no_reason(message, db):\n message.reply(Strings['GRANT_EXAMPLE'].format(db))", "def handle_no_permission(self):\n if self.is_ajax():\n return JsonResponse({'error': 'unauthorized'}, status=401)\n return super().handle_no_permission()", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def test_add_flow_requests_forbidden(self):\n # The dispatcher in test data doesn't have the flow_request:write authorization\n res = self._add_flow_request(client_name=POWERLESS_NAME)\n self.assertEqual(res.status_code, 403)", "def permit_required(self):\n return \"permission\" in self.description.lower()", "def forbidden(e):\n\n return render_template('errors/403.html'), 500", "def _enforce(self, req, action):\n try:\n self.policy.enforce(req.context, action, {})\n except exception.Forbidden:\n raise HTTPForbidden()", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def permission_denied(request, exception=None, template_name='403.html'):\n #pylint:disable=unused-argument\n if exception is None:\n exception = PermissionDenied()\n response = PermissionDeniedView(exception).dispatch(request)\n response.render()\n return response", "def forbidden(request):\n return Response(render_template('core/forbidden.html'),\n status=401, mimetype='text/html')", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self._assert_no_change()", "def get(self, *args, **kwargs):\n self.write(\"Not allowed\")\n self.finish()", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def requires_admin(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not users.is_current_user_admin():\n try:\n self.DenyAccess()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n else:\n return f(self, *args, **kwargs)\n return wrapper", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def require_worker_creator(worker):\n if is_server_administrator():\n return\n if is_worker_creator(worker):\n return\n raise cherrypy.HTTPError(403)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def write_authorize(cls, user, obj):\n if not obj.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS", "def perform_authorization(self):\n\n for permission in self.permissions:\n if not permission.has_permission():\n if request.user:\n raise errors.PermissionDenied()\n else:\n raise errors.NotAuthenticated()", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n identity = get_jwt_identity()\n if identity['role'] != 'admin':\n return jsonify({'message': 'Permission denied'}), 403\n else:\n return fn(*args, **kwargs)\n\n return wrapper", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def non_admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == True:\n return jsonify({\"messsage\": \"Only Non admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def privileged(f):\n @wraps(f)\n def wrapper(self, msg, *args, **kwargs):\n if not Guard.allow_sudo(msg):\n return\n return f(self, msg, *args, **kwargs)\n return wrapper", "def RequestedPermissions(self) -> _n_6_t_0:", "def write_authorize_examinercommon(cls, user, obj):\n if obj.delivered_by != None:\n raise PermissionDenied()", "def page_forbidden(e):\n return render_template(\"403.html\", page_title=403)", "def cmd_crash_private(self, argument):\n if self.is_admin:\n raise IndexError()\n else:\n self.logger.warning(\"User %s tried to use '%s' without being admin\" % (self.nick, \"crash\"))", "def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")", "def test_patch_not_allowed(self, parse_args):\n parse_args.side_effect = [{\n _ATTEMPT.attempt_id: 'forbidden'\n }, {\n _ATTEMPT.run_id: 'forbidden'\n }]\n _, err = self.resource.patch(self.attempts[1][_ATTEMPT.attempt_id])\n self.assertEqual(403, err)", "def superuser_only(view_func):\n def _inner(request, *args, **kwargs):\n if not request.user.is_superuser:\n raise PermissionDenied\n return view_func(request, *args, **kwargs)\n return _inner", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))" ]
[ "0.7416006", "0.71565545", "0.6592668", "0.6589614", "0.6547855", "0.65448385", "0.64365923", "0.6416801", "0.6404717", "0.6373068", "0.63710433", "0.6339145", "0.6339145", "0.6339145", "0.6339145", "0.6312995", "0.6312995", "0.6291042", "0.62679636", "0.6225085", "0.6165789", "0.61628765", "0.61430514", "0.6120078", "0.6100796", "0.6098501", "0.6097262", "0.6096876", "0.60750955", "0.6066793", "0.6063671", "0.6050653", "0.6044062", "0.6026567", "0.5996633", "0.5986633", "0.59702116", "0.5969543", "0.59492606", "0.59486026", "0.5941058", "0.5940235", "0.5938514", "0.5938514", "0.59366727", "0.5926023", "0.59224916", "0.59030837", "0.58984965", "0.58954525", "0.5869739", "0.5857334", "0.58530295", "0.58469987", "0.58437186", "0.5841176", "0.5841176", "0.58338994", "0.5831535", "0.5803895", "0.58005667", "0.5788698", "0.57769895", "0.57715344", "0.57639754", "0.5763813", "0.57603896", "0.5759859", "0.57564765", "0.57542634", "0.57542634", "0.5752521", "0.5745783", "0.57453793", "0.5736883", "0.5736566", "0.5717292", "0.5717292", "0.57123697", "0.570844", "0.5703542", "0.5694093", "0.56930715", "0.56930715", "0.56764525", "0.56684446", "0.5666713", "0.5660019", "0.56481004", "0.563946", "0.56358683", "0.56332725", "0.5633127", "0.5632139", "0.56169415", "0.56158245", "0.56152797", "0.5608011", "0.56060976", "0.5597672", "0.55964696" ]
0.0
-1
A backend can raise `PermissionDenied` to shortcircuit permission checking.
def _user_has_module_perms(user, app_label): for backend in auth.get_backends(): if not hasattr(backend, 'has_module_perms'): continue try: if backend.has_module_perms(user, app_label): return True except PermissionDenied: return False return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def permission_required_or_403(perm, *args, **kwargs):\n kwargs['return_403'] = True\n return permission_required(perm, *args, **kwargs)", "def DeniedPermissions(self) -> _n_6_t_0:", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def forbidden():\n return HttpError(403)", "def can(self, unused_perm):\n return False", "def get_everyone_denied(self):", "def permission_denied(request):\n\treturn render(request, '403.html', None)", "def forbidden(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseForbidden, *args, **kwargs)", "def test_requires_privilege_no_such(self):\n @requires_privilege('bomboozle', domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n request = HttpRequest()\n request.role = requestor_role\n with self.assertRaises(PermissionDenied):\n view(request)", "def get_authenticated_denied(self):", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def test_no_permission(self):\n self.assertStatusCode(self.url, 403)", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "def permission_required(permission):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not g.current_user.can(permission):\n return forbidden('Not permitted')\n return f(*args, **kwargs)\n return decorated_function\n return decorator", "def require_server_administrator():\n if not test_server_administrator():\n raise cherrypy.HTTPError(403)", "def permissions_required(permissions):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.can(permissions):\n abort(403)\n return f(*args,**kwargs)\n return decorated_function\n\n return decorator", "def test_no_permission(self):\n req = self.req(\"post\", \"/the/url\", data={\"action-doit\": \"3\"})\n req.user = Mock()\n req.user.has_perm.return_value = False\n\n res = self.view(\n req,\n decorator=self.actions(\n self.mock_model, [\"doit\"], permission=\"do_things\")\n )\n\n self.assertEqual(res.status_code, 403)\n req.user.has_perm.assert_called_with(\"do_things\")", "def require_project_administrator(project):\n if not test_project_administrator(project):\n raise cherrypy.HTTPError(403)", "def test_no_permission(client, mocker):\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=False\n )\n resp = client.post(reverse(\"order-fulfillment\"), data={})\n assert resp.status_code == statuses.HTTP_403_FORBIDDEN", "def cant(user, action):\n\n return not can(user, action)", "def permission_request_processor(page, request):\n if not has_permission_to_view(page, request.user):\n raise PermissionDenied", "def not_allowed(*args, **kwargs):\n return HttpResponseBehaviour(HttpResponseNotAllowed, *args, **kwargs)", "def test_no_permissions(self):\n\n login(self.client)\n\n client = create_client('test')\n client.write_access = False\n client.save()\n response = self.client.post('/backend/u/conc/', {'client': 'test', 'id': 3})\n self.assertEqual(response.status_code, 200)\n self.assertEqual('Permission not granted', response.content.decode('utf-8'))", "def permission_denied(request, exception):\n return defaults.permission_denied(request, exception, template_name=get_template_name(request, \"403.html\"))", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def resource_forbidden(exc, request):\r\n request.response_status = \"403 Forbidden\"\r\n return {'message': str(exc)}", "def app_permission_denied(self, request, message=None):\n if not request.successful_authenticator and not message:\n raise exceptions.NotAuthenticated()\n if message:\n raise exceptions.PermissionDenied(detail=message)\n raise exceptions.PermissionDenied(detail=message)", "def access_forbidden(e):\n return render_template(\"error/403.html\"), 403", "def forbidden(e):\n return render_template(\"errors/403.html\"), 403", "async def permission_valid_check(cls):\n pass", "def autz_required(permission, context=None):\n def decorator(func):\n\n @wraps(func)\n async def wrapper(*args):\n request = (args[-1].request\n if isinstance(args[-1], web.View)\n else args[-1])\n\n if await autz.permit(request, permission, context):\n return await func(*args)\n\n raise web.HTTPForbidden()\n\n return wrapper\n\n return decorator", "def test_authorization_required(self, method):\n self.user.user_permissions.clear()\n\n response = getattr(self.client, method)(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 403", "def test_forbidden(self):\n self._error_test(fitbit_exceptions.HTTPForbidden)", "def test_wrong_permission(self):\n with self.assertRaises(InvalidPermissionStringError):\n client_has_permission('test', 'asdf')", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def permits(identity, obj, permission):\n return False", "def render_or_deny(fn):\n @wraps(fn)\n def _wrapped(obj, options, context, *args, **kwargs):\n if not obj.access_check(options, context['request'].user):\n return PermissionDeniedWidget(\n obj.uid,\n obj.classname()\n ).render(options, context)\n else:\n return fn(obj, options, context)\n return _wrapped", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def test_requires_privilege_denied(self):\n\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n requestor_role = arbitrary.role()\n\n request = HttpRequest()\n request.role = requestor_role.instantiate({})\n with self.assertRaises(PermissionDenied):\n view(request)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def forbidden(self):\n self.flash(self._(\"You don't have the correct permissions to access this page.\"), category=\"error\")\n # TODO: maybe check barcamp and permissions for the barcamp homepage and redirect there instead\n # TODO: maybe create a remember decorator which remember the last page in the session which is safe to redirect to.\n # the forbidden handler should delete it though\n return redirect(self.url_for(\"index\"))", "def admin_required(f): # pragma: no cover\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if current_user.admin:\r\n return f(*args, **kwargs)\r\n else:\r\n return abort(403)\r\n return decorated_function", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def test_requires_privilege_no_current_role(self):\n @requires_privilege(self.zazzle_privilege.slug, domain='zizzle')\n def view(request, *args, **kwargs):\n pass\n\n request = HttpRequest()\n with self.assertRaises(PermissionDenied):\n view(request)", "def has_permission(self):\n return super().has_permission()", "def can_edit_or_403(self, user):\n if user.id != self.game_master.id:\n raise PermissionDenied\n return True", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def cant(self, permissions: Union[str, List]) -> bool:", "def assert_response_resource_not_accessible(self, response):\n self.assertEqual(response.status_code, 403)\n self.assertEqual(\n response.json(),\n {\"detail\": \"You do not have permission to perform this action.\"},\n )", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def __acl__(self):\n yield 'Allow', 'system.Everyone', 'none'\n yield security.DENY_ALL", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def no_reason(message, db):\n message.reply(Strings['GRANT_EXAMPLE'].format(db))", "def handle_no_permission(self):\n if self.is_ajax():\n return JsonResponse({'error': 'unauthorized'}, status=401)\n return super().handle_no_permission()", "def can_edit_or_403(self, user):\n if self.get_permission_level(user) < self.OWNER_PERMISSION:\n raise PermissionDenied\n return True", "def test_add_flow_requests_forbidden(self):\n # The dispatcher in test data doesn't have the flow_request:write authorization\n res = self._add_flow_request(client_name=POWERLESS_NAME)\n self.assertEqual(res.status_code, 403)", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def permit_required(self):\n return \"permission\" in self.description.lower()", "def forbidden(e):\n\n return render_template('errors/403.html'), 500", "def _enforce(self, req, action):\n try:\n self.policy.enforce(req.context, action, {})\n except exception.Forbidden:\n raise HTTPForbidden()", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def test_permission(self):\n response = self._get()\n self.assertEqual(response.status_code, 200)", "def permission_denied(request, exception=None, template_name='403.html'):\n #pylint:disable=unused-argument\n if exception is None:\n exception = PermissionDenied()\n response = PermissionDeniedView(exception).dispatch(request)\n response.render()\n return response", "def forbidden(request):\n return Response(render_template('core/forbidden.html'),\n status=401, mimetype='text/html')", "def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self._assert_no_change()", "def get(self, *args, **kwargs):\n self.write(\"Not allowed\")\n self.finish()", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def test_no_permission(self):\n self.user.user_permissions.remove(*self.permissions)\n response = self._get()\n self.assertEqual(response.status_code, 302)", "def requires_admin(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not users.is_current_user_admin():\n try:\n self.DenyAccess()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n else:\n return f(self, *args, **kwargs)\n return wrapper", "def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", True)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def require_worker_creator(worker):\n if is_server_administrator():\n return\n if is_worker_creator(worker):\n return\n raise cherrypy.HTTPError(403)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def write_authorize(cls, user, obj):\n if not obj.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS", "def perform_authorization(self):\n\n for permission in self.permissions:\n if not permission.has_permission():\n if request.user:\n raise errors.PermissionDenied()\n else:\n raise errors.NotAuthenticated()", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n identity = get_jwt_identity()\n if identity['role'] != 'admin':\n return jsonify({'message': 'Permission denied'}), 403\n else:\n return fn(*args, **kwargs)\n\n return wrapper", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def non_admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == True:\n return jsonify({\"messsage\": \"Only Non admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def RequestedPermissions(self) -> _n_6_t_0:", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def privileged(f):\n @wraps(f)\n def wrapper(self, msg, *args, **kwargs):\n if not Guard.allow_sudo(msg):\n return\n return f(self, msg, *args, **kwargs)\n return wrapper", "def write_authorize_examinercommon(cls, user, obj):\n if obj.delivered_by != None:\n raise PermissionDenied()", "def page_forbidden(e):\n return render_template(\"403.html\", page_title=403)", "def cmd_crash_private(self, argument):\n if self.is_admin:\n raise IndexError()\n else:\n self.logger.warning(\"User %s tried to use '%s' without being admin\" % (self.nick, \"crash\"))", "def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")", "def test_patch_not_allowed(self, parse_args):\n parse_args.side_effect = [{\n _ATTEMPT.attempt_id: 'forbidden'\n }, {\n _ATTEMPT.run_id: 'forbidden'\n }]\n _, err = self.resource.patch(self.attempts[1][_ATTEMPT.attempt_id])\n self.assertEqual(403, err)", "def superuser_only(view_func):\n def _inner(request, *args, **kwargs):\n if not request.user.is_superuser:\n raise PermissionDenied\n return view_func(request, *args, **kwargs)\n return _inner", "def test_cannot_delete_usage(self):\n p = Permission.objects.get(name='Can delete usage')\n self.user.user_permissions.add(p)\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.delete(reverse('api_v1:usage-detail', kwargs={'pk': 1}),\n follow=True)\n self.assertEqual(response.status_code, 405)\n self.assertIn('not allowed', str(response.content))" ]
[ "0.74162996", "0.71563745", "0.6592866", "0.6589361", "0.6548821", "0.6544809", "0.6436187", "0.6416667", "0.64040965", "0.6372819", "0.6371053", "0.6338895", "0.6338895", "0.6338895", "0.6338895", "0.63118577", "0.63118577", "0.629086", "0.6268523", "0.62251085", "0.61657155", "0.6163138", "0.614334", "0.6119782", "0.6099904", "0.6098559", "0.6097691", "0.6096374", "0.6075324", "0.60665417", "0.6062522", "0.6049966", "0.6043333", "0.60261667", "0.599672", "0.598682", "0.5969199", "0.5968936", "0.5949621", "0.5948824", "0.5940884", "0.59408677", "0.593877", "0.593877", "0.5935932", "0.5925741", "0.5922318", "0.59029466", "0.58986276", "0.5895224", "0.5870905", "0.585775", "0.5853242", "0.58472455", "0.5842491", "0.5841268", "0.5841268", "0.5833047", "0.5832195", "0.5804587", "0.5800715", "0.5789047", "0.57767797", "0.5772269", "0.5763951", "0.5763866", "0.5761467", "0.5759046", "0.57565606", "0.5754471", "0.5754471", "0.5751476", "0.5745627", "0.5744512", "0.5736889", "0.5736667", "0.5717103", "0.5717103", "0.57118857", "0.57083184", "0.5702648", "0.5694333", "0.5692948", "0.5692948", "0.5675872", "0.5669413", "0.56668186", "0.56597245", "0.56477", "0.5638919", "0.56355786", "0.56331885", "0.56330734", "0.5632783", "0.56162107", "0.5615013", "0.56144446", "0.56084365", "0.5605737", "0.5597335", "0.5596511" ]
0.0
-1
Return the first_name plus the last_name, with a space in between.
def get_full_name(self): full_name = '%s %s' % (self.first_name, self.last_name) return full_name.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def full_name(first_name, last_name):\n return first_name + \" \" + last_name", "def full_name(first_name, last_name):\n\t\n\treturn first_name + \" \" + last_name", "def get_formated_name(first_name,last_name):\n\tfull_name = first_name + '' + last_name\n\treturn full_name.title()", "def get_full_name(self):\n return self.first_name+\" \"+self.last_name", "def get_full_name(self):\n return self.first_name + ' ' + self.last_name", "def get_formatted_name(first_name,last_name):\n full_name= first_name + \" \"+last_name\n return full_name.title()", "def get_formatted_name(first_name,last_name):\n\tfull_name=first_name+ ' ' +last_name\n\treturn full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def parse_name(first_name, last_name):\n\n return first_name + \" \" + last_name", "def get_full_name(self):\n return self.last_name + self.first_name", "def get_full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_surname)", "def full_name(self):\n \tif self.first_name and self.last_name:\n \t\treturn \"{} {}\".format(self.first_name, self.last_name)", "def full_name(self,first_name):\n full_name = self.first_name + ' ' + self.last_name\n return full_name", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def get_full_name(self):\n return \"{} {}\".format(self.first_name, self.last_name)", "def full_name(self):\n return self.first_name + \" \" + self.last_name", "def first_last_name(obj):\n return '%s %s' % (obj.first_name, obj.last_name)", "def get_formatted_name(first, last):\n\tfull_name = first + ' ' + last\n\treturn full_name.title()", "def get_formatted_name(first_name, last_name):\n # Describe the function\n # The names are joined into full name\n formatted_name = first_name + ' ' + last_name\n # return the value, don't do anything with it yet\n return formatted_name.title()", "def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()", "def get_full_name(self):\n full_name = \"%s %s\" % (self.firstname, self.lastname)\n return full_name.strip()", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def get_formatted_name(first_name, last_name): \r\n full_name = f\"{first_name} {last_name}\"\r\n return full_name.title()", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )", "def get_formatted_name(first, last):\n full_name = first + ' ' + last\n return full_name.title()", "def get_full_name(self):\n return \"%s %s\" % (self._first_name, self._last_name)", "def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n return '{}, {}'.format(self.last_name, self.first_name)", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def get_full_name(self):\n full_name = '{} {}'.format(self.first_name, self.last_name)\n return full_name.strip()", "def get_formatted_name(first_name, last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first_name, last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def fullname(self):\n parts = []\n self.lastname and parts.append(self.lastname)\n self.firstname and parts.append(self.firstname)\n len(parts) == 0 and parts.append(self.userid)\n\n return ' '.join(parts)", "def formatted_name(first_name, last_name, middle_name = ''):\n full_name = first_name + ' ' + middle_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name, last_name, middle_name=''):\n if middle_name:\n full_name = first_name + ' ' + middle_name + ' ' + last_name\n else:\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_formatted_name(first_name, last_name, middle_name=''):\n if middle_name:\n full_name = first_name + ' ' + middle_name + ' ' + last_name\n else:\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_full_name(self):\n return self.name+self.last_name", "def get_formatted_name(first_name,last_name):\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first_name, last_name, middle_name = ''):\n if middle_name:\n full_name = first_name + ' ' + middle_name + ' ' + last_name\n else:\n full_name = first_name + ' ' + last_name\n return full_name.title()", "def get_full_name(self):\n return f'{self.first_name} {self.last_name}'", "def get_formatted_name(self):\n\n return '%s %s' % (self.last_name, self.first_initial)", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def get_full_name(self) -> str:\n return f\"{self.first_name} {self.last_name}\"", "def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def get_formatted_name(first_name, middle_name, last_name):\n full_name = f\"{first_name} {middle_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first_name, last_name, middle_name = ''):\n if middle_name:\n full_name = f\"{first_name} {middle_name} {last_name}\"\n else:\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()", "def get_formatted_name(first_name, last_name, middle_name=''):\r\n if middle_name:\r\n full_name = f\"{first_name} {middle_name} {last_name}\"\r\n else:\r\n full_name = f\"{first_name} {last_name}\"\r\n return full_name.title()", "def format_name(first_name, last_name, middle_name=\"\"):\r\n if middle_name:\r\n full_name = first_name + \" '\" + middle_name + \"' \" + last_name\r\n else:\r\n full_name = first_name + \" \" + last_name\r\n return full_name.title()", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def get_formatted_name(first, last):\n full_name = f\"{first} {last}\"\n return full_name.title()", "def get_formatted_name(first, last):\n full_name = f'{first} {last}'\n return full_name.title()", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def full_name(self):\n return f\"{self.first_name} {self.last_name}\"", "def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)", "def full_name(self) -> str:\r\n\t\tname = f'{self.last_name} {self.first_name}'\r\n\t\tif self.middle_name:\r\n\t\t\tname += ' ' + self.middle_name\r\n\t\treturn name", "def get_full_name(self):\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def first_name_and_initial(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name[0])", "def get_short_name(self):\n last_name = self.last_name\n first_name = self.first_name\n if (not (last_name and not last_name.isspace())):\n \"\"\" If last name is empty or none then return first name\"\"\"\n return first_name\n else:\n return last_name", "def get_formatted_name(first,last,middle = ''):\n if middle:\n full_name = first + ' ' + middle + ' ' + last\n else:\n full_name = first + ' ' + last\n return full_name.title()", "def make_full_name(first_name, last_name, middle_initial=\"\"):\n if middle_initial:\n return f\"{first_name} {middle_initial[0:1]}. {last_name}\"\n else:\n return f\"{first_name} {last_name}\"", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def get_full_name(self):\n return u'%s %s' % (self.first_name, self.last_name)", "def combine_name(self):\n if self.first_name.isalpha() and self.last_name.isalpha():\n username = self.first_name + \" \" + self.last_name\n return username\n return 'Names must be alphabets'", "def _get_full_name(self):\n if self.middle_name:\n return u'%s %s %s' % (self.first_name, self.middle_name,\n self.last_name)\n else:\n return u'%s %s' % (self.first_name, self.last_name)", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username" ]
[ "0.83421856", "0.82273436", "0.80990887", "0.8067676", "0.80525744", "0.8043721", "0.7993416", "0.7992513", "0.7992513", "0.7992513", "0.7978309", "0.7963202", "0.7957057", "0.79399455", "0.7936728", "0.79338795", "0.79154027", "0.7901051", "0.7892942", "0.787005", "0.7855431", "0.78537744", "0.78484356", "0.7836986", "0.78346753", "0.78346753", "0.7834202", "0.78321546", "0.7816311", "0.7789072", "0.77777326", "0.77777326", "0.77653325", "0.77648485", "0.77490413", "0.772976", "0.772976", "0.772976", "0.77229416", "0.77218884", "0.771977", "0.771977", "0.7711966", "0.77105695", "0.76966316", "0.76742035", "0.76724094", "0.7662747", "0.76525265", "0.76468456", "0.76468456", "0.76223135", "0.76001334", "0.75939584", "0.7575097", "0.75674367", "0.75548065", "0.75454396", "0.753692", "0.753692", "0.753692", "0.7525277", "0.7501819", "0.7476491", "0.74540085", "0.7411779", "0.73930013", "0.73824596", "0.73773456", "0.7371069", "0.73494387", "0.7341709", "0.731896", "0.7253558" ]
0.7785957
53
Return the short name for the user.
def get_short_name(self): return self.first_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return self.username", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username", "def get_short_name(self):\n # The user is identified by their email address\n return self.first_name", "def get_short_name(self):\n # The user is identified by the email address\n return self.email", "def full_name_short(self):\n return \"{}. {}\".format(str(self.user.first_name)[:1], self.user.last_name)", "def get_short_name(self) -> str:\n return self.first_name", "def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()", "def get_full_name(self):\n return self.username", "def get_full_name(self):\n return self.username", "def get_short_name(self):\r\n return self.first_name", "def full_name(self):\n return self.user.get_full_name() or None", "def get_displayname(self):\n return self.full_name or self.user.username", "def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name", "def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)", "def get_short_name(self):\n\n return self.first_name", "def short_name(self) -> str:\r\n\t\treturn f'{self.last_name} {self.first_name}'", "def get_user_fullname(self):\n return self.applicant.userprofile.display_name()", "def get_short_name(self):\n\t\treturn self.email", "def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")", "def name(self) -> str:\n return self.user.name", "def get_user_name(user: User) -> str:\n user_name = user.get(\"display_name\")\n if not user_name:\n user_name = user[\"fullname\"]\n if not user_name:\n user_name = user[\"name\"]\n return user_name", "def short_name(self):\n return self.get(\"short_name\", decode=True)", "def user_name(self):\n return lamin_user_settings().name", "def get_short_name(self):\n return self.last_name", "def display_name(self) -> str:\n return self.requester.username", "def full_name(self, obj: User) -> str:\n return obj.get_full_name()", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def get_short_name(self):\n return self.full_name.split(' ')[0]", "def get_short_name(self):\n\n return self.email", "def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def shortname(self):\n return self.get(\"shortName\")", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def user_name(self):\n return self._user_name", "def getShortName(self) -> str:\n return self.short_name", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def user_name(self):\n\n return self._user_name", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def get_short_name(self):\n return self.name", "def __str__(self):\n return self.user.get_full_name()", "def short_displayname(self):\n return self.get_short_displayname()", "def get_name(self):\n return self.user.username if self.user.username else self.user.email", "def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()", "def get_short_name(self):\n\n return self.name", "def get_short_name(self):\n\n return self.name", "def get_short_name(self):\r\n return self.name", "def __str__(self) -> str:\n\n return self.user.get_full_name()", "def get_user_fullname(self):\n member = self.get_user()\n if member:\n return member.getProperty('fullname')", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def get_current_user_full_name(self):\n user_service = self.runtime.service(self, 'user')\n xb_user = user_service.get_current_user()\n\n return xb_user.full_name", "def get_full_name(self):\n return self.name + \" \" + self.email", "def get_short_name(self):\n\n return self.name", "def full_name(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name)", "def getName(self):\n return self.__username", "def display_name(self):\n return self.settings['displayName']", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def getUserName(self):\n userType = self.env['res.users']\n \n uiUser = userType.browse(self._uid)\n return uiUser.name", "def get_full_name(self, include_title: bool = True) -> str:\n\n full_name = self.user.get_full_name()\n if include_title and self.title:\n title = self.get_title_repr()\n return f\"{full_name}, {title}\"\n return full_name", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")", "def get_full_name(self):\n return u'%s %s' % (self.first_name, self.last_name)", "def full_name(self):\n return self.first_name + \" \" + self.last_name", "def get_full_name(self):\n\n return self.name", "def get_short_name(self):\n return self.firstname\n\n # allows assignment property to the image value\n # from the view", "def username(self) -> str:", "def username(self) -> str:", "def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]", "def first_name(self, instance):\r\n return instance.user.first_name", "def get_full_name(self):\n return self.name+self.last_name" ]
[ "0.90364677", "0.90364677", "0.90364677", "0.8846686", "0.8816478", "0.8637505", "0.84531367", "0.83548844", "0.82865363", "0.82676977", "0.82676977", "0.820093", "0.8189229", "0.8159965", "0.8155279", "0.81156635", "0.81005275", "0.804605", "0.80386364", "0.79630274", "0.7917967", "0.7880938", "0.78530526", "0.78371847", "0.78349924", "0.7833075", "0.7826769", "0.7816021", "0.7812947", "0.7794141", "0.7790537", "0.77707374", "0.776698", "0.77555186", "0.7742493", "0.77299327", "0.76493794", "0.76340085", "0.7620265", "0.75958514", "0.75784844", "0.75784844", "0.75784844", "0.75784844", "0.75784844", "0.75784844", "0.75784844", "0.75784844", "0.7577557", "0.75664866", "0.7536361", "0.75255126", "0.75156915", "0.75156915", "0.75096273", "0.74933636", "0.7478253", "0.7472163", "0.74299693", "0.74258065", "0.7415603", "0.7415093", "0.73733306", "0.73729676", "0.73654014", "0.73654014", "0.73654014", "0.73654014", "0.73654014", "0.73339367", "0.7304325", "0.72763133", "0.72763133", "0.72763133", "0.72712064", "0.72708654", "0.72701323", "0.7268243", "0.72606117", "0.72606117", "0.72581613", "0.72501135", "0.7247177" ]
0.8196602
28
Send an email to this user.
def email_user(self, subject, message, from_email=None, **kwargs): send_mail(subject, message, from_email, [self.email], **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None, **kwargs):\r\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\r\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_to_user(self, subject, message, sender=None, **kwargs):\n send_mail(subject, message, sender, [self.email], **kwargs)", "def email_user(self, subject, message,\n from_email=settings.DEFAULT_FROM_EMAIL, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None):\n send_mail(subject, message, from_email, [self.username])", "def email_user(self, subject, message, from_email=None):\n\t\tsend_mail(subject, message, from_email, [self.email])", "def email_user(self, subject, message, from_email=None, **kwargs):\n\t\tsend_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject, message, from_email=None, **kwargs):\n send_mail(subject, message, from_email, [self.email], **kwargs)", "def email_user(self, subject: str, message: str, from_email: str = None) -> None:\n send_mail(subject, message, from_email, [self.email])", "def send_user_email(user, subject, template_name, context=None):\n\n if context is None:\n context = {}\n\n context['user'] = user\n\n to = (user.email,)\n\n send(subject, to, template_name, context)", "def email(self, email_address, message):\n self.server.sendmail(self.username, email_address, message)", "def send_email(self):\n message = MIMEText(self.email_body, 'plain', 'utf-8')\n\n message['Subject'] = self.email_subject\n message['From'] = gmail_user\n message['To'] = ', '.join(self.recipients)\n\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n\n server.login(gmail_user, gmail_password)\n\n server.sendmail(message['From'], self.recipients, message.as_string())\n\n server.close()\n\n print('Email sent!')\n except Exception as err:\n # TODO Write error to log file\n raise err", "def send(self):\n return get_current_sender().sendmail(self)", "def send_email(self, message):\n pass", "def send_message(user_id, name, user_info, subject, body):\n send_mail(subject, body, settings.SERVER_EMAIL, [\"%s <%s>\" % (name, user_id)],\n fail_silently=False, html_message=body)", "def send_email(self, email_from, email_to, message):\n logging.info(\"Attempting to send email from \" + email_from + \" to \" + email_to)\n self.conn.sendmail(email_from, email_to, message)\n logging.info(\"Email sent\")", "def send(self, email):\r\n smtp = smtplib.SMTP(self.server, self.port)\r\n smtp.ehlo()\r\n \r\n if self.tls:\r\n smtp.starttls()\r\n smtp.ehlo()\r\n\r\n if self.user and self.passwd:\r\n smtp.login(self.user, self.passwd)\r\n\r\n smtp.sendmail(email.from_address, email.to + email.ccs, str(email))\r\n if email.bccs:\r\n email.root['X-antroy-sent'] = \"True\"\r\n smtp.sendmail(email.from_address, email.bccs, str(email))\r\n del email.root['X-antroy-sent']\r\n smtp.quit()", "def send(self):\n return send_mail(self.subject, self.message, self.sender, self.recipients, fail_silently=False)", "def send_activation_email(self, user):\n activation_key = self.get_activation_key(user)\n context = self.get_email_context(activation_key)\n context[\"user\"] = user\n subject = render_to_string(\n template_name=self.email_subject_template,\n context=context,\n request=self.request,\n )\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = \"\".join(subject.splitlines())\n message = render_to_string(\n template_name=self.email_body_template,\n context=context,\n request=self.request,\n )\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def send_mail(email):\n return email.send()", "def sending(self, message):\n sending_mail.send(sender=self.__class__, message=message)", "def send_activation_email(self, user):\n\t\tactivation_key = self.get_activation_key(user)\n\t\tcontext = self.get_email_context(activation_key)\n\t\tcontext.update({\n\t\t\t'user': user\n\t\t})\n\t\tsubject = render_to_string(self.email_subject_template,\n\t\t\t\t\t\t\t\t context)\n\t\t# Force subject to a single line to avoid header-injection\n\t\t# issues.\n\t\tsubject = ''.join(subject.splitlines())\n\t\tmessage = render_to_string(self.email_body_template,\n\t\t\t\t\t\t\t\t context)\n\t\tuser.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)", "def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()", "def send_mail(self, subject):\r\n pass", "def send_mail_to_user(user,\n template,\n template_context=None,\n delay=False,\n **kwargs):\n function = send_mail.delay if delay is True else send_mail\n\n if user.email and user.email_validated:\n return function(user.email, template, template_context=template_context, **kwargs)", "def send_email(self, email):\n\n if not isinstance(email, str):\n raise TypeError('type of email must be str not %s' % type(email))\n\n message = self.get_message(email)\n self.server.send_message(message)", "def send(self, **kwargs):\n if hasattr(self.object, 'member'):\n self.add_to(self.object.member.user.email)\n elif hasattr(self.object, 'membership'):\n self.add_to(self.object.created_by.email)\n return super(GrantedAccessMailer, self).send(**kwargs)", "def send_mail(self, address, title, message):\n pass", "def sent(self, message):\n sent_mail.send(sender=self.__class__, message=message)", "def send_email_via_api(self, to, subject, message):\n\n return self.mail.send(to, subject, message)", "def send_new_email(user):\n token = user.get_token()\n message = Message(\n 'Verify Your New Email',\n sender='storcwebsite@gmail.com',\n recipients=[user.temp_email])\n message.body = f\"The email address associated with your Storc \" \\\n f\"account has changed.\\n\\nTo verify your new email address, \" \\\n f\"please click the link below:\\n\\n\" \\\n f\"{url_for('users.new_email', token=token, _external=True)}\"\n mail.send(message)", "def send_email(self, to, content):\r\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login(self.from_, self.password)\r\n server.sendmail(self.from_, to, content)\r\n speak(\"Email has been sent Succesfully!\")\r\n return \"None\"", "def send_confirmation_email(user_pk):\n pass", "def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)", "def send_message(self, message, send_to, subject):\n message = message.mime()\n\n message['From'] = self.email_address\n message['To'] = send_to\n\n message['Subject'] = subject\n\n self._login()\n self.server.sendmail(self.email_address, send_to, message.as_string())\n self._logout()", "def send_mail(subject):\r\n obj = EmailNotification().emailobj()\r\n obj.send_mail(subject)", "def email_signup_user(email, msg, settings, message_data):\r\n from bookie.lib.message import ActivationMsg\r\n msg = ActivationMsg(email, msg, settings)\r\n status = msg.send(message_data)\r\n if status == 4:\r\n from bookie.lib.applog import SignupLog\r\n trans = transaction.begin()\r\n SignupLog(SignupLog.ERROR,\r\n 'Could not send smtp email to signup: ' + email)\r\n trans.commit()", "def post(self):\n return send_email(request.args)", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "async def send_to_user(self, user: User, msg: Msg, address: str = None):\n if address is None:\n address = user.current_address\n\n await self.send(msg, address)", "def send_confirmation_email(self, user):\n verification_token = self.gen_verification_token(user)\n subject = 'Welcome @{}! Verify your account to start using Comparte Ride'.format(user.username)\n from_email = 'Comparte Ride <noreply@comparteride.com>'\n content = render_to_string(\n 'emails/users/account_verification.html',\n {\n 'token': verification_token,\n 'user': user\n })\n msg = EmailMultiAlternatives(subject, content, from_email, [user.email])\n msg.attach(content, 'text/html')\n msg.send()\n print(\"Sending email\")", "def send_email(self, to, subject, message):\n\n email_to = \"james@vixal.net\"\n try:\n mx_alarm = AlertEmail(email_to, self.subject, self.message)\n mx_alarm.send()\n print(\"\\t{} |{}| Successfully sent email.\".format(Timer.OK, self.tinfo['name']))\n return True\n except Exception as e:\n print(\"\\t{} Exception in send_email! {}\".format(Timer.FAIL, e))", "def send_email( user, password ):\n \n mail = Mailer( host = EMAIL['host'], \n port = EMAIL['port'],\n use_tls = EMAIL['use_tls'], \n usr = EMAIL['user'], \n pwd = EMAIL['password']\n )\n \n message = Message( From = 'help@rxmedaccess.com',\n To = [user.email],\n Subject = \"Password Reset\"\n )\n \n body = \"\"\"Your new password for {} is {}\n You can reset it to what you like on your settings page once you log in with\n this password\n \"\"\".format(__name__, password )\n\n message.Body = body\n try:\n mail.send(message)\n except Exception as e:\n log.error( 'Send mail error: {}'.format( str(e) ) )", "def send_confirmation_email(self, *args, **kwargs):\n raise NotImplementedError", "def send_message(self, subject, body):\n headers = [\n \"From: \" + self.email,\n \"Subject: \" + subject,\n \"To: \" + self.email,\n \"MIME-Version: 1.0\",\n \"Content-Type: text/html\"]\n headers = \"\\r\\n\".join(headers)\n self.session.sendmail(\n self.email,\n self.email,\n headers + \"\\r\\n\\r\\n\" + body)", "def send_confirmation(self):\r\n c.user.email_validated = False\r\n c.user.confirmation_code = random_key(6)\r\n c.user._commit()\r\n emailer.confirmation_email(c.user)", "def _send_email(self, confirmation_profile, url,\n subject, text_template, html_template,\n send_to, **kwargs):\n current_site = Site.objects.get_current()\n email_kwargs = {'activation_key': confirmation_profile.activation_key,\n 'domain': current_site.domain,\n 'activate_url': url,\n 'login_url': reverse('users.login'),\n 'reg': 'main'}\n email_kwargs.update(kwargs)\n\n # RegistrationProfile doesn't have a locale attribute. So if\n # we get one of those, then we have to get the real profile\n # from the user.\n if hasattr(confirmation_profile, 'locale'):\n locale = confirmation_profile.locale\n else:\n locale = confirmation_profile.user.profile.locale\n\n @email_utils.safe_translation\n def _make_mail(locale):\n mail = email_utils.make_mail(\n subject=subject,\n text_template=text_template,\n html_template=html_template,\n context_vars=email_kwargs,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to_email=send_to)\n\n return mail\n\n email_utils.send_messages([_make_mail(locale)])", "def __send_message(self, message):\n logging.debug(\"Sending message\")\n try:\n message = self.__email_api.messages.send(message=message)\n return message\n except Error as error:\n logging.error('An error occurred emailing a user: {0}'.format(error))\n raise error", "def send_mail(self, html):\n message = Message(\n From=self._config['mail']['address'], To=self._config['mail']['to'],\n Subject=self._config['mail']['subject']\n )\n message.Html = html\n return self.sender.send(message)", "def send_mail(user, subject, message, from_email, recipient_list, fail_silently=False,\\\n auth_user=None, auth_password=None, connection=None, html_message=None):\n message = smart_text(gpg.sign(message))\n try:\n Profile.objects.get(user= user).pgp_gpg_public_key\n message = smart_text(gpg.encrypt(message))\n except:\n pass\n send_email_django(subject, message, from_email, recipient_list, fail_silently,\\\n auth_user, auth_password, connection, html_message)\n return True", "def send(self, to_addrs, subject, message, from_addr=None):\n if not from_addr: from_addr = self.user\n data = \"From: %s\\nTo: %s\\nSubject: %s\\n\\n%s\" \\\n % (from_addr, to_addrs, subject, message)\n try:\n server = smtplib.SMTP(self.host)\n server.ehlo()\n server.starttls()\n server.ehlo() # This must be done before and after starttls().\n server.login(self.user, self.password)\n server.sendmail(from_addr, to_addrs, data)\n except:\n raise\n try:\n server.quit() # This always fails and can safely be ignored.\n except:\n pass", "def send_email(self, body=None, subject=None, to=list, cc=None, bcc=None,\n send_as=None, attachments=None):\n email = Message(self.access_token, body, subject, to, cc=cc, bcc=bcc, sender=send_as)\n\n if attachments is not None:\n for attachment in attachments:\n email.attach(attachment.get('bytes'), attachment.get('name'))\n\n email.send()", "def send(self):\n ReferralActivity.objects.create(\n actor=self.user,\n verb=ReferralActivityVerb.CREATED,\n referral=self,\n )\n # Confirm the referral has been sent to the requester by email\n Mailer.send_referral_saved(self)\n # Send this email to all owners of the unit(s) (admins are not supposed to receive\n # email notifications)\n for unit in self.units.all():\n contacts = unit.members.filter(\n unitmembership__role=UnitMembershipRole.OWNER\n )\n for contact in contacts:\n Mailer.send_referral_received(self, contact=contact, unit=unit)", "def send_email(subject, sender, recipients, text_body, html_body):\n\t\tmsg = Message(subject, sender=sender, recipients=recipients)\n\t\tmsg.body = text_body\n\t\tmsg.html = html_body\n\t\tmail.send(msg)", "def send_confirmation_email(user_pk):\n user = User.objects.get(pk=user_pk)\n verification_token = gen_verification_token(user)\n subject = 'Welcome @{}! Verify your account'.format(user.username)\n from_email = 'Gym Admin <frameworkdjango7@gmail.com>'\n content = render_to_string(\n 'users/account_verification.html',\n {'token': verification_token, 'user': user}\n )\n msg = EmailMultiAlternatives(subject, content, from_email, [user.email])\n msg.attach_alternative(content, 'text/html')\n msg.send()", "def send_email(my_email, password, message):\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(my_email, password)\n # send from my_email to my_email (from, to, message)\n server.sendmail(my_email, my_email, message)\n server.quit()", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def send_verify_email(user):\n token = user.get_token()\n message = Message(\n 'Verify Your Email',\n sender='storcwebsite@gmail.com',\n recipients=[user.email])\n message.body = f\"Thanks for signing up with Storc!\\n\\nTo verify \" \\\n f\"your email address, please click the link below:\\n\\n\" \\\n f\"{url_for('users.verify_email', token=token, _external=True)}\"\n mail.send(message)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def send(self):\n logger.debug('Sending Email')\n self.mimepgp.send()", "def send_confirmation_email(user_pk):\n user = User.objects.get(pk= user_pk)\n verifiaction_token = gen_verification_token(user)\n print(\"Sending email\")\n subject = 'Wellcome @{}! Verfiy your account to start using Comparte Ride'.format(user.username)\n from_email = 'Comparte Ride <noreply@comparteride.com>'\n text_content = render_to_string('emails/users/account_verification.html',\n {'token': verifiaction_token, \n 'user': user})\n msg = EmailMultiAlternatives(subject, text_content, from_email, [user.email])\n msg.attach_alternative(text_content, 'text/html')\n msg.send()", "def send_confirmation_email(user_pk):\n user = User.objects.get(pk=user_pk)\n type = 'email_confirmation'\n token = token_generation(user, type)\n subject = 'Welcome @{}! Verify your account'.format(user.username)\n from_email = 'Facebook <Facebook.com>'\n content = render_to_string(\n 'users/account_verification.html', {'token': token, 'user': user})\n msg = EmailMultiAlternatives(subject, content, from_email, [user.email])\n msg.attach_alternative(content, 'text/html')\n msg.send()", "def send_mail(from_email, to_emails, subject, plain_body, html_body):\n\n # Implementation goes here\n # ...", "def __send_verification(self, email):\r\n user = User.getUser(email.lower())\r\n if user is None or user.verified:\r\n self.set_error(constants.STATUS_BAD_REQUEST, message=None, url=\"/\")\r\n return\r\n user.verificationCode = b64encode(CryptoUtil.get_verify_code(), \"*$\")\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'code': user.verificationCode,\r\n 'url': constants.VERIFICATION_URL\r\n }\r\n template = self.jinja2_env.get_template('verificationemail.jinja')\r\n message = mail.EmailMessage()\r\n message.sender = constants.SENDER_ADDRESS\r\n message.to = user.email\r\n message.subject = 'Please verify your address'\r\n message.body = template.render(template_values)\r\n message.send()\r\n user.put()", "def send_activation_email(self):\n ctx_dict = {\n 'activation_key': self.activation_key,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,\n 'user': self.user,\n 'SITE_URL': settings.SITE_URL,\n }\n subject = render_to_string('accounts/activation_email_subject.txt', ctx_dict)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n \n message = render_to_string('accounts/activation_email_body.html', ctx_dict)\n\n msg = EmailMultiAlternatives(subject, message, None, [self.user.email])\n msg.attach_alternative(message, \"text/html\")\n msg.send()", "def email(self, instance):\r\n return mark_safe('<a href=\"mailto:{0}\">{1}</a>'.format(\r\n instance.user.email, instance.user.email,\r\n ))", "def _send_registration_email(request, user, acct_type):\n current_site = get_current_site(request)\n subject = \"Activate your PuPPy Mentorship Account\"\n\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n activation_token = account_activation_token.make_token(user)\n\n url_token = uid.decode('utf-8') + '/' + activation_token\n\n message = render_to_string(\n 'mentorship_profile/activation_email.html', {\n \"user\": user,\n \"domain\": current_site.domain,\n \"account_type\": acct_type,\n \"url_token\": url_token\n }\n )\n user.email_user(subject, message)", "def send_mail(to, subject, html, text, reply_to=None, sender=SENDER):\n\n from caravel.utils import principals\n\n # Verify that we are not sending spam to people.\n if not (isinstance(to, principals.Principal) and to.valid):\n raise ValueError(\"{!r} does not consented to email.\".format(to))\n\n # Verify that we are not sending spam from people.\n if reply_to:\n if not (isinstance(reply_to, principals.Principal) and reply_to.valid):\n raise ValueError(\"{!r} has not consented to send email.\"\n .format(reply_to))\n\n # Actually send the message to the user.\n _send_raw_mail(\n to=to.email,\n subject=subject,\n html=html,\n text=text,\n reply_to=reply_to.email if reply_to else None,\n sender=sender\n )", "def send_email_to_trial_user_with_link(\n to, context, from_email=settings.DEFAULT_FROM_EMAIL):\n template = EMAIL_DICT['parse_trial_user_resume']['template']\n subject = EMAIL_DICT['parse_trial_user_resume']['subject']\n return threadify(_send, to, context, subject, from_email, template)", "def send_mail(self, **kwargs):\n if kwargs.get('source') is None:\n kwargs['source'] = self.config['default_source']\n self.conn.send_email(**kwargs)", "def email(self):\r\n webbrowser.open(\"mailto: gorm90@gmail.com\")", "def send(self, use_async: bool = True, language: str | None = None):\n if not ENABLED_SEND_EMAILS:\n return\n\n use_async = not self.attaches and use_async\n if self.use_i18n and settings.USE_I18N:\n language = self.get_language()\n translation.activate(language)\n self.subject = \"%s\" % self.subject\n message = self.get_message()\n message_plain = self.get_plain_message(message)\n if use_async:\n self.async_send(message, message_plain)\n else:\n self.sync_send(message, message_plain)", "def send_email(settings, excel):\n Email._set_email(settings, excel)\n Email._send_email_helper(settings, excel)", "def send_mail(self):\n try:\n mail = smtplib.SMTP('smtp.gmail.com', 587)\n mail.ehlo()\n mail.starttls()\n mail.login(self.mail_user, self.mail_pass)\n content = \"Subject: Test %s %s on host %s\\n\\n%s\\n logs are save at localhost path:\\n%s\" % (\n self.test_name, self.event, self.host_name, self.event_details, self.log_path\n )\n mail.sendmail(self.mail_user, self.target_mail, content)\n mail.close()\n except Exception as e:\n self.logger.error(\"Sending mail failed with Error %s\", e)\n\n else:\n self.logger.info(\"Mail sent to %s\", self.target_mail)", "def send_email(form_instance, **kwargs):\n cleaned_data = form_instance.cleaned_data\n\n try:\n from_email = cleaned_data.pop(kwargs[\"from_email_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"from_email_field\")\n try:\n to_email = cleaned_data.pop(kwargs[\"to_email_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"to_email_field\")\n try:\n subject = cleaned_data.pop(kwargs[\"subject_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"subject_field\")\n\n if \"uuid\" in cleaned_data:\n del cleaned_data[\"uuid\"]\n\n if \"form_id\" in cleaned_data:\n del cleaned_data[\"form_id\"]\n\n email_body = \"\".join([\n \"%s: %s\\n\\r\" % (get_label(form_instance, label), value)\n for label, value in cleaned_data.items()\n ])\n send_mail(subject, email_body, from_email, [to_email])", "def quick_email(self, send_to, subject, body, style=None):\n message = Message(body, style=style)\n\n self.send_message(message, send_to, subject)" ]
[ "0.8407289", "0.8407289", "0.8407289", "0.8407289", "0.8407289", "0.8407289", "0.8407289", "0.8407289", "0.83597654", "0.83339775", "0.83339775", "0.8311258", "0.8289855", "0.8277409", "0.82756543", "0.8237377", "0.82188", "0.8208959", "0.72830915", "0.7241861", "0.71827024", "0.7115423", "0.70822084", "0.7048464", "0.7023751", "0.6983798", "0.6974202", "0.691071", "0.6869249", "0.6860032", "0.6855065", "0.6854013", "0.67114604", "0.6649905", "0.66235036", "0.66079456", "0.65906996", "0.6587044", "0.6559602", "0.655513", "0.6549336", "0.65416944", "0.6539062", "0.65374476", "0.6530136", "0.6516128", "0.6501524", "0.64959437", "0.6484811", "0.648324", "0.6478901", "0.64751595", "0.647338", "0.6436797", "0.6419015", "0.64105195", "0.6397055", "0.63833743", "0.63807595", "0.6380094", "0.63777745", "0.6362425", "0.63355327", "0.6333054", "0.63229", "0.6319368", "0.63141847", "0.6310847", "0.6304458", "0.63000005", "0.62934655", "0.62819153", "0.62763214", "0.6265615", "0.6255934", "0.6245557", "0.6245511", "0.62376785", "0.6237487", "0.62347883", "0.6234461", "0.6227772", "0.62230545", "0.62158453", "0.62067133" ]
0.8291159
25
Returns the 2nd largest value from a given list.
def second_largest(values: List[int]) -> int: try: return sorted(set(values))[-2] except IndexError: raise ValueError("second_largest() needs at least two distinct values")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def two_largest(inlist):\n largest = second_largest = 0\n it1 = it2 = 0\n\n for i,item in enumerate(inlist):\n if item > largest:\n largest = item\n it1 = i\n elif largest > item > second_largest:\n second_largest = item\n it2 = i\n # Return the results as a tuple\n return largest, it1, second_largest, it2", "def second_largest(number_list):\n for i in range(len(number_list)):\n for j in range(len(number_list) - 1 - i):\n if number_list[j] > number_list[j+1]:\n number_list[j + 1], number_list[j] = number_list[j], number_list[j+1]\n\n return number_list[-2]", "def nth_largest2(a_list, n):\n a_list.sort()\n new_list = a_list[::-1]\n return new_list[n-1]", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def findSecondLargest(self):\n l = []\n self.flatten(l)\n print(l)\n print(l[-2])", "def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest", "def largest_number_at_least_twice_of_others2(nums: [int]) -> int:\n if len(nums) == 1:\n return 0\n\n max_index = nums.index(max(nums))\n max_val = nums.pop(max_index)\n next_max = max(nums)\n\n if next_max * 2 <= max_val:\n return max_index\n return -1", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def max_in_list(list):\n x=list[0] #set x be the first number in the list\n for i in range(0,len(list)):#go over the number in the list\n if x<=list[i]: #if the second one is bigger than the first\n x=list[i] #assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x", "def find_greatest_number(incoming_list):\n #return_value = max(incoming_list)\n #return return_value\n\n MAGIC_LOW_NUMBER = None\n retval = MAGIC_LOW_NUMBER\n\n # 1,2,3,4,5,1\n # MAGIC_LOW_NUMBER, 1 ->STORE 1\n #1 , 2 ->STORE 2\n #2, , 3 ->STORE 3\n #3, , 4 ->STORE 4 \n #4, , 5 ->STORE 5\n #5, , 1 ->??? nothing \n for value in incoming_list:\n if not retval:\n retval = value\n if value > retval:\n retval = value", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)", "def largest_item(list):\n pass", "def find_max(list):\n return find_value_at(list, 0)", "def find_max(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] > ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_max(ls[0:mid])\n m2 = find_max(ls[mid:])\n return m1 if m1 > m2 else m2", "def largest(n,xs):\n return sorted(xs, reverse = True)[:n][::-1]", "def return_max(lst, highest=None):\n if highest is None and len(lst) > 0:\n highest = lst[0]\n if len(lst) <= 1:\n return highest\n highest = max(highest, lst[0])\n return return_max(lst[1:], highest)", "def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)", "def nextMax(value,lista):\n for i in lista:\n if i>value:\n return i\n raise NameError('No value')", "def max(l):\n if l:\n s_list = sorted(l)\n return s_list[-1]\n else:\n raise ValueError(\"list empty\")", "def greatest_difference(num_list):", "def test_find_second_largest(self):\n secondLargestValue = sorted(self.values)[-2]\n valueFound = self.tree.findSecondLargest(self.tree.root)\n self.assertEquals(secondLargestValue, valueFound)", "def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)", "def max_val_rec(alist):\n ln = len(alist)\n mid = ln//2\n if ln > 2:\n left = max_val_rec(alist[:mid])\n right = max_val_rec(alist[mid:])\n return left if left > right else right\n else:\n return max(alist)", "def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])", "def get_max_loot(input_list):\n even = sum(input_list[::2])\n odd = sum(input_list[1::2])\n return even if even > odd else odd", "def largest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a > b, a, b)\r\n else:\r\n return max(stack(*args), axis=0)", "def recurrent_max_value_in_list(lst, max_value):\n if len(lst) == 0:\n return max_value\n elif lst[0] > max_value:\n max_value = lst[0]\n return recurrent_max_value_in_list(lst[1:], max_value)", "def maximum(some_list):\n return max(some_list)", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n most: int = input[0]\n n: int = 1\n while n < len(input):\n if input[n] > most:\n most = input[n]\n n += 1 \n return most", "def largest_two():\n # Add your code below!", "def largest_number_at_least_twice_of_others(nums: [int]) -> int:\n largest = None\n next_largest = None\n\n for idx, num in enumerate(nums):\n if largest is None:\n largest = idx\n continue\n if num > nums[largest]:\n next_largest = largest\n largest = idx\n continue\n if next_largest is None or num > nums[next_largest]:\n next_largest = idx\n\n if next_largest is None or (nums[next_largest] * 2) <= nums[largest]:\n return largest\n return -1", "def max(input: list[int]) -> int:\n i = 0\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n\n else:\n while i < len(input):\n j = i + 1\n while j < len(input):\n if input[i] > input[j]:\n if j == len(input) - 1:\n if input[i] >= input[len(input) - 1]:\n return input[i]\n j += 1\n else:\n j += len(input)\n i += 1\n return input[len(input) - 1]", "def maximo(arr):\n maxVal = float('-inf')\n maxIdx = -1\n\n for i in range(len(arr)):\n if arr[i] > maxVal:\n maxVal = arr[i]\n maxIdx = i\n\n return maxVal, maxIdx", "def max_num_in_list(a_list):\n max_number = max(a_list)\n return max_number", "def max_num(num_list):\n\n return max(num_list)", "def give_greatest_spart(cls, spart_list):\n if len(spart_list) == 1:\n return spart_list[0]\n sorted_list = cls.sort_by_dominance(spart_list)\n if not(sorted_list[0] > sorted_list[1]):\n print(\"The two largest elements are non-comparable\")\n return []\n else:\n return sorted_list[0]", "def find_max(data):\n index = 0\n res = data[index]\n for i in range(1, len(data)):\n if data[i] > res:\n res = float(data[i])\n index = i\n else:\n break\n return res, index", "def index_largest(seq):\n assert len(seq) > 0\n x, greatest, index = len(seq), seq[0], 0\n for elem in range(1, x):\n if seq[elem] > greatest:\n greatest = seq[elem]\n index = elem\n return index", "def argmax(sequence):\r\n\r\n import operator\r\n index, value = max(enumerate(sequence), key=operator.itemgetter(1))\r\n\r\n return index", "def indexOfMax(list):\n max = -np.Infinity\n index = 0\n i = 0\n for value in list:\n if value > max:\n max = value\n index = i\n i += 1\n return index", "def find_max(self):\r\n maxVal = self.items[1]\r\n if maxVal is None:\r\n return None\r\n \r\n for i in range(1,len(self.items)):\r\n if self.items[i] is not None:\r\n if self.items[i] > maxVal:\r\n maxVal = self.items[i]\r\n return maxVal", "def max_list_iter(int_list: Optional[List]) -> Optional[int]:\r\n if int_list is None:\r\n raise ValueError\r\n elif len(int_list) == 0:\r\n return None\r\n elif len(int_list) == 1:\r\n return int_list[0]\r\n else:\r\n maxVal = int_list[0]\r\n for value in int_list:\r\n if value > maxVal:\r\n maxVal = value\r\n return value", "def question_24(list_num: int) -> int:\n return max(list_num, key=list_num.count)", "def r_max(nxs):\n largest = None\n for i,e in enumerate(nxs):\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if i == 0 or val > largest:\n largest = val\n\n return largest", "def last2(x, y):\n y = np.asarray(y)\n return y[np.argsort(x)][-1]", "def latest(scores: list) -> int:\n return scores[-1]", "def find_largest_diff(list_of_nums):\n largest_diff = 0\n for i in range(len(list_of_nums) - 1):\n diff = abs(list_of_nums[i] - list_of_nums[i+1])\n if diff > largest_diff:\n largest_diff = diff\n\n return largest_diff", "def __argmax(l: list, key):\n max = float('-inf')\n max_i = -1\n for i in range(len(l)):\n if key(l[i]) > max:\n max = key(l[i])\n max_i = i\n return max_i", "def maxVal(item_list, rem_space):\n if item_list == [] or rem_space == 0: # no items or space\n result = (0, ())\n else:\n next_item = item_list[0]\n if next_item.getCost() > rem_space:\n result = maxVal(item_list[1:], rem_space)\n else:\n with_val, with_list = maxVal(item_list[1:],\n rem_space-next_item.getCost())\n with_val += next_item.getValue()\n\n without_val, without_list = maxVal(item_list[1:],\n rem_space)\n if with_val > without_val:\n result = (with_val, with_list + (next_item, ))\n else:\n result = (without_val, without_list)\n return result", "def max(self, num_list):\n try:\n max = int(num_list[0])\n\n for number in num_list:\n try:\n if number > max:\n max = number\n except Exception as e:\n print(\"Error\", e)\n\n except Exception as e:\n print(\"Error:\", e)\n\n return max", "def list_max(numbers):\n maxnum = 0\n \n for num in numbers[0:]:\n if num > maxnum:\n maxnum = num\n return maxnum", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def maxNumber(x):\n maxVal = x[0]\n for num in x:\n if maxVal <num:\n maxVal=num\n return maxVal", "def max_pairwise_product_sort(numbers):\n sorted_list = sorted(numbers)\n ans = sorted_list[-1]*sorted_list[-2]\n return ans", "def max_index_of_smaller_number(list, number):\n for i, element in enumerate(list):\n if element >= number:\n return i - 1", "def my_max(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]\n\n if not args:\n raise ValueError(\"Can't find max, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find max, wrong data format\")\n return sorter(args)", "def highestMax(requestContext, seriesList, n):\n result_list = sorted( seriesList, key=lambda s: max(s) )[-n:]\n\n return sorted(result_list, key=lambda s: max(s), reverse=True)", "def largest(array, n):\n\n #set max as first array element\n max = array[0]\n\n #compare current max with next array element, replace max if next element is larger\n\n for i in range(1, n):\n if array[i] > max:\n max = array[i]\n return max", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == None:\n raise ValueError(\"Must be list\")\n else:\n if len(int_list) < 1:\n return None\n max = int_list[0]\n for val in int_list:\n if val > max:\n max = val\n return max", "def find_max_len_item(the_list):\n return max(the_list, key=len)", "def _get_k_largest(lst, k):\n sorted_lst = sorted([(val, index) for index, val in enumerate(lst)])\n return list(reversed(sorted_lst[-k:]))", "def max_list_iter(int_list): # must use iteration not recursion\n\n if int_list is None:\n raise ValueError\n\n if not int_list:\n return None\n\n if len(int_list) == 1:\n return int_list[0]\n\n max_so_far: object = int_list[0]\n for i in range(len(int_list)):\n if int_list[i] > max_so_far:\n max_so_far = int_list[i]\n\n return max_so_far", "def max_or_zero(list):\n try:\n return max(list)\n except ValueError:\n return 0", "def get_big_joker_value(deck: List[int]) -> int:\n return max(deck)", "def find_max(weather_data):\n if len(weather_data) == 0:\n return()\n\n value = float(weather_data[0])\n position = 0\n\n for index, weather in enumerate(weather_data):\n if float(weather) >= value:\n value= float(weather)\n position = index\n\n return(value, position)", "def largest_element(a):\n\n return None", "def find_largest_number_in_list(self, list_with_numbers):\n return 0", "def highestCurrent(requestContext, seriesList, n):\n return sorted( seriesList, key=safeLast )[-n:]", "def nth_largest(a_list, n):\n if n < 1:\n return\n n -= 1 # 0 indexing\n\n pivot = a_list[-1]\n\n bigger = -1\n print a_list\n for i in range(len(a_list)-1):\n if a_list[i] > pivot:\n a_list[bigger+1], a_list[i] = a_list[i], a_list[bigger+1]\n bigger += 1\n # swap pivot with bigger+1 element\n a_list[bigger+1], a_list[-1] = a_list[-1], a_list[bigger+1]\n print a_list\n print bigger\n print n\n\n if n == bigger+1:\n return a_list[n]\n elif n < bigger+1: # element in left sub array\n return nth_largest(a_list[:bigger+1], n+1)\n elif n > bigger+1: # element in right sub array\n return nth_largest(a_list[bigger+2:], n+1-len(a_list[:bigger+2]))", "def getNextHighest(self):\r\n maxScore = -1\r\n idx = -1\r\n for i, s in enumerate(self.scores):\r\n if s.score > maxScore:\r\n maxScore = s.score\r\n idx = i\r\n if idx != -1:\r\n score = self.scores[idx]\r\n del self.scores[idx]\r\n return score\r\n else:\r\n return None", "def twoMaxs(lnp):\n\tindex1 = 0\n\tindex2 = 0\n\tcnt = 0\n\tmaxArea = 0\n\tmaxArea2 = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(ew * eh >= maxArea):\n\t\t\tindex1 = cnt\n\t\t\tmaxArea = ew * eh\n\t\tcnt += 1\n\t\n\n\tcnt = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(index1 == cnt):\n\t\t\tcnt += 1\n\t\t\tcontinue\n\t\tif(ew * eh >= maxArea2):\n\t\t\tindex2 = cnt\n\t\t\tmaxArea2 = ew * eh\n\t\tcnt +=1\n\t\n\treturn (index1, index2)", "def maxi(a,b):\n\tif a > b: \n\t\treturn a\n\treturn b", "def personal_best(scores: list) -> int:\n return max(scores)", "def max_info(lst):\n k = []\n maxm = -1\n for i in range(len(lst)):\n if lst[i] == maxm:\n k.append(i)\n if lst[i] > maxm:\n maxm = lst[i]\n k = [i]\n return k", "def findKthLargest(self, nums: List[int], k: int) -> int:\n return sorted(nums)[-k]", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def get_largest_index(student_list, length):\n largest_index = 0\n for i in range(length):\n if student_list[i] > student_list[largest_index]:\n largest_index = i\n return largest_index", "def get_max_index_of_list(a_list):\n if isinstance(a_list, np.ndarray):\n idx = np.argmax(a_list)\n elif isinstance(a_list, list):\n idx=a_list.index(max(a_list))\n return idx", "def getHighest(key, values, num):\n assert isinstance(key, list)\n assert isinstance(values, list)\n assert isinstance(num, int)\n key, values = getSorted(key, values)\n newKey = key[:num]\n newValue = values[:num]\n return newKey, newValue", "def find_max_val_unimodal_arr(unimodal_arr):\n arr = unimodal_arr\n maxfound = False\n if (len(arr) == 0):\n print('empty list')\n return -1\n\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n\n if (len(arr) == 1):\n print('maximum value = ' + str(arr[center]))\n return arr[center]\n\n if (len(arr) == 2):\n print('maximum value = ' + str(arr[left] if arr[left] > arr[right] else arr[right]))\n return arr[left] if arr[left] > arr[right] else arr[right]\n\n while (not maxfound):\n if (arr[left] > arr[center]):\n arr = arr[:center]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if (arr[right] > arr[center]):\n arr = arr[center:]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if ((arr[right] <= arr[center]) and (arr[left] <= arr[center])):\n maxfound = True\n\n print('maximum value = ' + str(arr[center]))\n return arr[center]", "def largest_int(numbers):\n\n if numbers == []:\n return \n max_int = numbers[0]\n for number in numbers:\n if number > max_int:\n max_int = number\n \n return max_int", "def GetMax(val, maximum):\n\tval = float(val)\n\tmaximum = float(maximum)\n\treturn max([val, maximum])", "def maxn(a,b):\n\n if a>b:\n return a\n else:\n return b", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def max_val(t):\n # Your code here\n\n def openItem(term):\n newList = []\n\n for item in term:\n if type(item) == int:\n newList.append(item)\n\n else:\n newList += openItem(item)\n\n return newList\n\n sortingList = openItem(t)\n\n maximum = sortingList[0]\n\n for item in sortingList:\n if maximum < item:\n maximum = item\n\n return maximum", "def method2(self, nums):\n N = len(nums)\n inc = 1\n dec = 1\n \n for i in range(1, N):\n if nums[i] > nums[i - 1]:\n dec = inc + 1\n elif nums[i] < nums[i - 1]:\n inc = dec + 1\n \n return max(inc, dec)", "def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]", "def r_max(nxs):\n largest = None\n first_time = True\n for e in nxs:\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if first_time or val > largest:\n largest = val\n first_time = False\n\n return largest", "def secondSmallest(lstItems):\n \n if len(lstItems) == 0:\n return -1;\n \n index = 0#current index in the list\n returnNext = False;#return the next occurence of a value equal to valSmallest\n valSmallest = lstItems[0]#just declaring and initializing valSmallest\n\n for val in lstItems:\n if returnNext == True and val == valSmallest:\n return index;\n\n if numSmaller(lstItems, val) == 0 and len(occurrences(lstItems, val)) > 1:\n returnNext = True;\n valSmallest = val\n elif numSmaller(lstItems, val) == 1:\n iSecondSmallest = index;\n index+= 1\n\n return iSecondSmallest;", "def most_popular(lst):\n lst2 = copy.copy(lst)\n most_pop = []\n\n for j in range(10):\n biggest = 0\n biggest_name = \"\"\n index = 0\n for i in range(len(lst2)):\n if lst2[i][1] > biggest:\n biggest = lst2[i][1]\n biggest_name = lst2[i][0]\n index = i\n most_pop.append((j+1, biggest, biggest_name))\n del lst2[index]\n return most_pop", "def two_oldest_ages(ages: list) -> list:\n return sorted(ages)[-2:]", "def get_max(num_one, num_two):\n temp_a = int(str(num_one) + str(num_two))\n temp_b = int(str(num_two) + str(num_one))\n if temp_a >= temp_b:\n return num_one\n else:\n return num_two", "def argmax(fn,over):\n return max([(arg,fn(arg)) for arg in over],key=lambda v: v[1])[0]", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == None: # error handling\n raise ValueError\n elif len(int_list) == 0: # when the list is empty\n return None\n else:\n max_num = int_list[0]\n for num in int_list:\n if num > max_num:\n max_num = num\n return max_num", "def get_youngest_student(students):\n youngest_index = 0 \n youngest = students[0][3]\n for counter, row in enumerate(students[1:], 1):\n if int(row[3]) > int(youngest):\n youngest = students[counter][3]\n youngest_index = counter \n return students[youngest_index]", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == []:\n return None\n elif int_list == None:\n raise ValueError\n max_int = int_list[0]\n \n for i in int_list:\n if i > max_int:\n max_int = i\n return max_int", "def largestNumber(self, nums): \n def string_comp(item1, item2):\n return 1 if str(item1) + str(item2) < str(item2) + str(item1) else -1\n res_list = sorted(nums, key=cmp_to_key(string_comp))\n\n # Catch edge case where list of 0s will produce \"000..\" instead of a single \"0\"\n if set(res_list) == {0}:\n return \"0\"\n return \"\".join([str(i) for i in res_list])" ]
[ "0.79893696", "0.7755604", "0.76443726", "0.75765264", "0.7529355", "0.75202054", "0.75019395", "0.746542", "0.7456516", "0.738916", "0.7388165", "0.733121", "0.7291496", "0.7259317", "0.7139178", "0.713446", "0.70853615", "0.7072863", "0.6986864", "0.6984842", "0.6957062", "0.6955505", "0.68840003", "0.6868519", "0.6793554", "0.6764099", "0.6757375", "0.67570126", "0.6728527", "0.67183864", "0.6712471", "0.67079794", "0.667056", "0.6625989", "0.66050196", "0.66024584", "0.6593408", "0.652491", "0.6519156", "0.6499097", "0.6491467", "0.64904857", "0.6428486", "0.6412654", "0.6410921", "0.64033026", "0.64030945", "0.63953054", "0.63876814", "0.63493824", "0.6325026", "0.632107", "0.63187176", "0.62997794", "0.62997794", "0.62838036", "0.6278713", "0.6249483", "0.62418365", "0.6239147", "0.62388194", "0.62376726", "0.6229499", "0.62231505", "0.6201587", "0.61935383", "0.6192398", "0.6183571", "0.61822855", "0.61786747", "0.61733705", "0.61717814", "0.6148141", "0.6146675", "0.61452836", "0.61359674", "0.6133789", "0.61309916", "0.61272055", "0.6123429", "0.610781", "0.6103622", "0.6102576", "0.60888815", "0.60869926", "0.60661983", "0.6060406", "0.606039", "0.6049546", "0.60427976", "0.6009599", "0.5994403", "0.59610546", "0.5951626", "0.594731", "0.5945929", "0.5938512", "0.5937433", "0.59290403", "0.5921943" ]
0.8168143
0
Find the program in the system path.
def FindEnv(progname): for path in os.environ['PATH'].split(':'): fullname = os.path.join(path, progname) if os.access(fullname, os.X_OK): return fullname raise AssertionError( "Could not find an executable named '%s' in the system path" % progname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _which(self, program):\n\n def is_exe(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n os.environ[\"PATH\"] += os.pathsep + '%s/bin/' % basedir\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n print 'ishakesumd not found, build it or place it in the PATH before using this tool.'\n exit(1)", "def find_program(name):\r\n # See MSDN for the REAL search order.\r\n base, ext = os.path.splitext(name)\r\n if ext:\r\n exts = [ext]\r\n else:\r\n exts = ['.bat', '.exe']\r\n for directory in os.environ['PATH'].split(os.pathsep):\r\n for e in exts:\r\n fname = os.path.join(directory, base + e)\r\n if os.path.exists(fname):\r\n return fname\r\n return None", "def find_program_file():\n value = sys.argv[0]\n msg = \"Failed to determine absolute pathname of program!\"\n if not os.path.isabs(value):\n candidates = which(value)\n if not candidates:\n raise Exception(msg)\n value = candidates[0]\n if not os.access(value, os.X_OK):\n raise Exception(msg)\n return value", "def which(program):\n\t# requirements = os\n\tis_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))\n\tfor path in os.environ['PATH'].split(os.pathsep):\n\t\tpath = path.strip('\"')\n\t\texe_file = os.path.join(path, program)\n\t\tif is_exe(exe_file):\n\t\t\treturn exe_file\n\tif is_exe(program):\n\t\treturn os.path.abspath(program)\n\treturn None", "def which(program, program_name):\n fpath, fname = os.path.split(program)\n if fpath:\n if __is_exe__(program):\n return program\n elif (__is_script__(program)):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if __is_exe__(exe_file):\n return exe_file\n logger.error(program_name + \" path = \" + fpath +\n \" not locatable in the path of directory specified\")\n return None", "def which(program):\r\n def is_exe(fpath):\r\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\r\n\r\n fpath, fname = os.path.split(program)\r\n if fpath:\r\n if is_exe(program):\r\n return program\r\n else:\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n exe_file = os.path.join(path, program)\r\n if is_exe(exe_file):\r\n return exe_file\r\n\r\n return None", "def find_program(name):\r\n return name", "def find_program(binary_name):\n pth = os.path.abspath(__file__)\n\n # Split off the name and the directory...\n pth, notused = os.path.split(pth)\n pth, notused = os.path.split(pth)\n pth = os.path.join(pth, \"programs\", binary_name)\n pth = os.path.normpath(pth)\n\n log.debug(\"Checking for program %s\", binary_name)\n if not os.path.exists(pth) or not os.path.isfile(pth):\n log.error(\"No such file: '%s'\", pth)\n raise PartitionFinderError\n log.debug(\"Found program %s at '%s'\", binary_name, pth)\n return pth", "def which(program):\r\n import os\r\n def is_exe(fpath):\r\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\r\n\r\n fpath, fname = os.path.split(program)\r\n if fpath:\r\n if is_exe(program):\r\n return program\r\n else:\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n path = path.strip('\"')\r\n exe_file = os.path.join(path, program)\r\n if is_exe(exe_file):\r\n return exe_file\r\n\r\n return None", "def which(program):\n\n\tfpath, fname = os.path.split(program)\n\tif fpath:\n\t\tif is_exe(program):\n\t\t\treturn program\n\telse:\n\t\tfor path in os.environ[\"PATH\"].split(os.pathsep):\n\t\t\tpath = path.strip('\"')\n\t\t\texe_file = os.path.join(path, program)\n\t\t\tif is_exe(exe_file):\n\t\t\t\treturn exe_file\n\n\treturn None", "def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n return None", "def which(program):\n if (os.path.isabs(program) and os.path.isfile(program) and\n os.access(program, os.X_OK)):\n return [program]\n\n candidates = []\n locations = os.environ.get(\"PATH\").split(os.pathsep)\n for location in locations:\n candidate = os.path.join(location, program)\n if os.path.isfile(candidate) and os.access(candidate, os.X_OK):\n candidates.append(candidate)\n return candidates", "def find_program(basename):\n names = [basename]\n if os.name == 'nt':\n # Windows platforms\n extensions = ('.exe', '.bat', '.cmd')\n if not basename.endswith(extensions):\n names = [basename+ext for ext in extensions]+[basename]\n for name in names:\n path = is_program_installed(name)\n if path:\n return path", "def which(program):\n\n def is_bin(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_bin(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n bin_file = os.path.join(path, program)\n if is_bin(bin_file):\n return bin_file\n\n return None", "def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def which(program):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def which(program):\n import os\n def is_exe(fpath):\n return os.path.exists(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def findInPath (exe):\n\n\tfor dirname in os.environ['PATH'].split (os.path.pathsep):\n\t\tpossible = os.path.join (dirname, exe)\n\t\tif os.path.isfile (possible):\n\t\t\treturn possible\n\n\t# Not found\n\traise NotFoundInPathException (exe)", "def real_which(program):\n which_file = rsvprobe.which(program)\n if which_file:\n return os.path.realpath(which_file)\n else:\n return None", "def which(program):\n fpath, fname = os.path.split(program)\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n raise ExecutableNotFound(program)", "def _Which(program, paths):\n if sys.platform == 'win32' and not program.lower().endswith('.exe'):\n program += '.exe'\n\n for path in paths:\n candidate = os.path.join(os.path.normpath(path), program)\n if os.path.isfile(candidate):\n return candidate\n\n return None", "def shutil_which(pgm):\n path = os.getenv('PATH')\n for p in path.split(os.path.pathsep):\n p = os.path.join(p, pgm)\n if os.path.exists(p) and os.access(p, os.X_OK):\n return p", "def shutil_which(pgm):\n path = os.getenv('PATH')\n for p in path.split(os.path.pathsep):\n p = os.path.join(p, pgm)\n if os.path.exists(p) and os.access(p, os.X_OK):\n return p", "def which(program):\n\n def is_exe(fpath):\n \"\"\"\n Return True is the fpath exists and is executable. This is needed since\n executables are specifed in the JSON files, but not the path to them.\n The executables may be in different locations based on which PC is\n running this.\n \"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def scan_path(executable=\"mongod\"):\n for path in os.environ.get(\"PATH\", \"\").split(\":\"):\n path = os.path.abspath(path)\n executable_path = os.path.join(path, executable)\n if os.path.exists(executable_path):\n return executable_path", "def SearchPath(name, path=None):\n path = path or os.environ['PATH']\n for dir in path.split(os.pathsep):\n binpath = os.path.join(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None", "def find_program(ctx, names, paths=None, *, quieter=0):\n\n if paths is None:\n paths = os.environ['PATH'].split(os.pathsep)\n\n # If we're running on windows, we need to append '.exe' to the filenames\n # that we're searching for.\n if sys.platform == 'win32':\n new_names = []\n for name in names:\n if \\\n not name.endswith('.exe') or \\\n not name.endswith('.cmd') or \\\n not name.endswith('.bat'):\n new_names.append(name + '.exe')\n new_names.append(name + '.cmd')\n new_names.append(name + '.bat')\n new_names.append(name)\n names = new_names\n\n for name in names:\n ctx.logger.check('looking for program ' + name, verbose=quieter)\n\n filename = fbuild.path.Path(name)\n if filename.exists() and filename.isfile():\n ctx.logger.passed('ok %s' % filename, verbose=quieter)\n return fbuild.path.Path(name)\n else:\n for path in paths:\n filename = fbuild.path.Path(path, name)\n if filename.exists() and filename.isfile():\n ctx.logger.passed('ok %s' % filename, verbose=quieter)\n return fbuild.path.Path(filename)\n\n ctx.logger.failed(verbose=quieter)\n\n raise MissingProgram(names)", "def which(program):\n\n def is_exe(fpath):\n found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n if not found and sys.platform == \"win32\":\n fpath = fpath + \".exe\"\n found = os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n return found\n\n fpath, _ = os.path.split(program)\n if fpath:\n if is_exe(program):\n logger.debug(\"Found executable: \" + str(program))\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = os.path.expandvars(os.path.expanduser(path)).strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n logger.debug(\"Found executable: \" + str(exe_file))\n return exe_file\n\n return None", "def _which(program):\n # Borrowed from:\n # https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python\n # XXX May need more porting to handle .exe extensions on Windows\n\n fpath, _fname = os.path.split(program)\n if fpath:\n if _is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if _is_exe(exe_file):\n return exe_file\n\n return None", "def which(program):\n def is_exe(fpath):\n \"\"\"Determine wether file at given path is executable.\"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, _ = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def which():\n\n location = None\n if os.path.basename(_git_path) != _git_path:\n if os.path.isfile(_git_path):\n location = _git_path\n else:\n paths = [x for x in os.environ[\"PATH\"].split(os.pathsep) if not x.isspace()]\n for path in paths:\n exe = os.path.join(path, _git_path)\n if os.path.isfile(exe):\n location = exe\n break\n return location", "def path_which(args):\n print(header(\"$PATH Lookup: {}\".format(args.look)))\n loop_fmt = \"{color}{path}\"\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n\n msg = check_exec(part, args.look, args.version)\n if msg:\n print(header(loop_fmt.format(color=color, path=part), '-'))\n print(msg)\n cnt = (cnt + 1) % len(CODES)", "def where(self, exe, path=None):\n if exe is None:\n return None\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n\n def is_executable(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n if sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(exe)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n exe_name = exe + ext\n for p in paths:\n exe_path = os.path.join(p, exe_name)\n if is_executable(exe_path):\n return exe_path\n\n return None", "def find_executable(name, paths):\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n # If not found, just assume it's in the PATH.\n return name", "def find_executable(name):\n for dir in os.environ.get('PATH', '').split(os.pathsep):\n if not dir:\n continue\n fn = os.path.abspath(os.path.join(dir, name))\n if os.path.exists(fn):\n return os.path.abspath(fn)", "def find_executable(name):\n if os.path.isabs(name):\n return name if os.path.isfile(name) else None\n else:\n if iswin32:\n paths = os.environ['Path'].split(';')\n if '' not in paths and '.' not in paths:\n paths.append('.')\n try:\n systemroot = os.environ['SYSTEMROOT']\n except KeyError:\n pass\n else:\n paths = [re.sub('%SystemRoot%', systemroot, path)\n for path in paths]\n else:\n paths = os.environ['PATH'].split(':')\n tryadd = []\n if iswin32:\n tryadd += os.environ['PATHEXT'].split(os.pathsep)\n tryadd.append(\"\")\n\n for x in paths:\n for addext in tryadd:\n p = os.path.join(x, name) + addext\n try:\n if os.path.isfile(p):\n return p\n except Exception:\n pass\n return None", "def search_whohas(pac):\n p = Popen(['which', 'whohas'], stdout=PIPE).communicate()[0]\n if p:\n wh = p.strip() #FFFFFFFFF newline.\n else:\n wh = os.getcwd()+\"/whohas-0.29/program/whohas\"\n os.system(wh+\" \"+pac)", "def find_program(assembler_name, program, assembler_arg, option=True):\n if assembler_arg == assembler_name and option and not which(program):\n err = (textwrap.dedent(\"\"\"\n We could not find the \"{}\" program. You either need to\n install it or you need to adjust the PATH environment\n variable with the \"--path\" option so that aTRAM can\n find it.\"\"\")).format(program)\n sys.exit(err)", "def which(executable):\n if 'PATH' in os.environ:\n envpath = os.environ['PATH']\n else:\n envpath = os.defpath\n PATH = envpath.split(os.pathsep)\n\n locations = PATH + [\n '/usr/local/bin',\n '/bin',\n '/usr/bin',\n '/usr/local/sbin',\n '/usr/sbin',\n '/sbin',\n ]\n\n for location in locations:\n executable_path = os.path.join(location, executable)\n if os.path.exists(executable_path):\n return executable_path", "def Which(binary, path=None):\n if path is None:\n path = os.environ.get('PATH', '')\n for p in path.split(':'):\n p = os.path.join(p, binary)\n if os.access(p, os.X_OK):\n return p\n return None", "def which(name):\n # Inspired by https://twistedmatrix.com/trac/browser/tags/releases/\n # twisted-8.2.0/twisted/python/procutils.py\n # pylint: disable=W0141\n result = []\n path = os.environ.get('PATH', None)\n if path is None:\n return []\n for pdir in os.environ.get('PATH', '').split(os.pathsep):\n fname = os.path.join(pdir, name)\n if os.path.isfile(fname) and os.access(fname, os.X_OK):\n result.append(fname)\n return result[0] if result else None", "def find_executable(binary):\n\n\tfor syspath in os.environ.get('PATH', default_path).split(':'):\n\t\tif os.path.exists(os.path.join(syspath, binary)):\n\t\t\treturn os.path.join(syspath, binary)\n\n\treturn None", "def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))", "def which(cmd):\n for path in os.environ['PATH'].split(os.pathsep):\n path = path.strip('\"')\n cmd_path = os.path.join(path, cmd)\n if os.path.isfile(cmd_path) and os.access(cmd_path, os.X_OK):\n return cmd_path\n\n return None", "def which(fname):\n if \"PATH\" not in os.environ or not os.environ[\"PATH\"]:\n path = os.defpath\n else:\n path = os.environ[\"PATH\"]\n\n for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]:\n p = os.path.abspath(p)\n if os.access(p, os.X_OK) and not os.path.isdir(p):\n return p\n\n p = sp.Popen(\"locate %s\" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n (stdout, stderr) = p.communicate()\n if not stderr:\n for p in stdout.decode().split(\"\\n\"):\n if (\n (os.path.basename(p) == fname)\n and (os.access(p, os.X_OK))\n and (not os.path.isdir(p))\n ):\n return p", "def which(cls, cmd):\n return get_exe_path(cmd + '.exe')", "def SearchPath(name):\n search_path = os.getenv('PATH', os.defpath).split(os.pathsep)\n for directory in search_path:\n if directory:\n path = os.path.join(directory, name)\n if os.path.isfile(path) and os.access(path, os.X_OK):\n return path\n return None", "def find_tool():\n return shutil.which('nm')", "def which(executable):\n cmd = 'where' if IS_WINDOWS else 'which'\n return run_cmd((cmd, executable), call=False, fail_silently=True)", "def which(cls, cmd):\n abs_path_cmd = None\n if sys.version_info >= (3, 3):\n abs_path_cmd = shutil.which(cmd)\n else:\n abs_path_cmd = find_executable(cmd)\n return abs_path_cmd", "def which(program):\n\n file_path, file_name = os.path.split(program)\n\n if file_path:\n if is_exectuable(program):\n return True\n\n else:\n for path in os.environ['PATH'].split(os.pathsep):\n exe_file = os.path.join(path, program)\n\n if is_executable(exe_file):\n return True\n\n return False", "def _which(executable):\n\n def is_exe(fpath):\n \"\"\"Returns True if the path is an executable\"\"\"\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, _ = os.path.split(executable)\n if fpath:\n if is_exe(executable):\n return executable\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, executable)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def get_executable(self) -> str:\n ...", "def asdf_which(version):\n try:\n cmd = \"asdf where python {}\".format(version)\n python_home = subprocess.check_output(\n cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True\n ).strip()\n except subprocess.CalledProcessError as e:\n handle_asdf_error(e)\n return os.path.join(python_home, \"bin\", \"python\")", "def thepath = getProgramPath(theprog):\r\n\r\n theprog = lower(theprog);\r\n\r\n if strcmp(theprog,'POV-Ray')\r\n # install location for POV-Ray\r\n thepath = '/usr/local/bin';\r\n\r\n else if strcmp(theprog,'quietpov')\r\n # install location for the QuietPOV add-on\r\n thepath = 'C:\\Program Files\\POV-Ray for Windows v3.6\\guiext\\QuietPOV';\r\n\r\n else if strcmp(theprog,'imagemagick')\r\n # install location for ImageMagick\r\n thepath = '/home/kieran/Downloads/ImageMagick-6.8.5-8';\r\n\r\n else if strcmp(theprog,'ffmpeg')\r\n # install location for the ffmpeg library\r\n thepath = '/usr/bin/ffmpeg';\r\n\r\n else\r\n thepath = '';", "def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret", "def find_executable(self, executable):\n\n # Assume standard virtualenv/Conda layout\n if WIN:\n paths = [self._path,\n os.path.join(self._path, 'Scripts'),\n os.path.join(self._path, 'bin')]\n else:\n paths = [os.path.join(self._path, 'bin')]\n\n return util.which(executable, paths)", "def checkForExe(exe):\n exepath = None\n \n # first check if we've been given an absolute path\n if len(os.path.split(exe)[0]):\n# print \"CHECK FOR EXE ABS PATH\", exe\n \n if os.path.exists(exe):\n exepath = exe\n \n else:\n # basename\n exe = os.path.basename(exe)\n# print \"SEARCHING FOR BASENAME IN SYS PATH\", exe\n \n if exepath is None:\n # check if exe programme located\n syspath = os.getenv(\"PATH\", \"\")\n syspatharray = syspath.split(\":\")\n found = 0\n for syspath in syspatharray:\n if os.path.exists(os.path.join(syspath, exe)):\n found = 1\n break\n \n if found:\n exepath = exe\n \n else:\n for syspath in EXTENDED_PATH:\n if os.path.exists(os.path.join(syspath, exe)):\n found = 1\n break\n \n if found:\n exepath = os.path.join(syspath, exe)\n \n else:\n exepath = 0\n \n return exepath", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def get_exe(*preference):\n for exe in preference:\n path = distutils.spawn.find_executable(exe)\n if path:\n return path", "def find_executable(executable, path=None):\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n base, ext = os.path.splitext(executable)\n\n if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):\n executable = executable + '.exe'\n\n if not os.path.isfile(executable):\n for p in paths:\n f = os.path.join(p, executable)\n if os.path.isfile(f):\n # the file exists, we have a shot at spawn working\n return f\n return None\n else:\n return executable", "def which(executable):\n if executable.startswith('/'):\n return executable\n\n path = os.environ['PATH'].split(os.pathsep)\n\n for executable_with_ext in _executable_names(executable):\n for entry in path:\n joined = os.path.join(entry, executable_with_ext)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n\n return None", "def checkForExeGlob(exe):\n # check if exe programme located\n syspath = os.getenv(\"PATH\", \"\")\n syspatharray = syspath.split(\":\")\n found = 0\n for syspath in syspatharray:\n matches = glob.glob(os.path.join(syspath, exe))\n if len(matches):\n found = 1\n break\n \n if found:\n exepath = matches[0]\n \n else:\n for syspath in EXTENDED_PATH:\n matches = glob.glob(os.path.join(syspath, exe))\n if len(matches):\n found = 1\n break\n \n if found:\n exepath = matches[0]\n \n else:\n exepath = 0\n \n return exepath", "def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)", "def find_tool():\n return shutil.which('readelf')", "def which(executable):\n def is_executable(path):\n \"\"\"True if path exists and is executable.\"\"\"\n return (os.path.exists(path) and\n not os.path.isdir(path) and\n os.access(path, os.F_OK | os.X_OK))\n\n def normalize(path):\n \"\"\"Return canonical case-normalized path.\"\"\"\n return os.path.normcase(os.path.realpath(path))\n\n def path_list():\n \"\"\"Get executable path list.\"\"\"\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)\n\n def pathext_list():\n \"\"\"Get list of extensions to automatically search.\"\"\"\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)\n\n seen = set()\n\n for path in [normalize(p) for p in path_list()]:\n if path not in seen:\n for ext in [\"\"] + pathext_list():\n full_path = os.path.join(path, executable) + ext\n if is_executable(full_path):\n return full_path\n\n seen.add(path)\n\n return None", "def which(file, env=os.environ):\n if file is None:\n return None\n for path in env.get('PATH', '').split(os.pathsep):\n if path:\n result = os.path.join(path, file)\n if os.path.exists(result):\n return os.path.realpath(result)\n return None", "def findTryExec(self):\n tryexec = self.get('TryExec', strict=True)\n return which(tryexec)", "def is_exist(program):\n def is_exe(fpath):\n return path.isfile(fpath) and access(fpath, X_OK)\n\n fpath, _ = path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for mypath in environ[\"PATH\"].split(pathsep):\n exe_file = path.join(mypath, program)\n if is_exe(exe_file):\n return exe_file\n\n return None", "def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"", "def find_snort():\n for path in os.environ[\"PATH\"].split(os.pathsep):\n filename = os.path.join(\"snort\")\n if os.path.exists(filename):\n return filename\n return None", "def find_tool():\n return shutil.which('dump')", "def bin_search(binary):\n if sys.platform == 'win32':\n # Directory containing 'binary' should be in PATH\n return binary\n result = None\n mode = os.R_OK | os.X_OK\n for p in bin_search_path:\n path = join(p, binary)\n if os.access(path, mode) == 1:\n result = path\n break\n else:\n raise MissingBinary('Unable to find binary \"%s\"' % binary)\n return result", "def is_program_installed(basename):\n for path in os.environ[\"PATH\"].split(os.pathsep):\n abspath = osp.join(path, basename)\n if osp.isfile(abspath):\n return abspath", "def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path", "def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")", "def find_software(software='samtools'):\n out = subprocess.run(['which {0}'.format(software)]\n , shell=True)\n if out.returncode != 0:\n return False\n\n return True", "def executable():\n return sys.executable", "def check_PATH_for_program(f):\n\n path = os.environ[\"PATH\"].split(\":\")\n\n for p in path:\n\n if os.path.isfile(os.path.join(p,f)):\n return True\n\n return False", "def _prog(shell_cmd):\n cmd = _which(shell_cmd)\n return os.path.basename(cmd) if cmd else None", "def open_program(path):\r\n os.startfile(path)", "def program_dir():\n if (Win32() and (hasattr(sys, 'frozen') or imp.is_frozen('__main__'))):\n # running from exe generated by py2exe\n return os.path.dirname(sys.executable)\n else:\n return sys.path[0]\n # return os.path.dirname(os.path.abspath(sys.argv[0]))", "def program_exists(name):\n for path in os.environ['PATH'].split(os.path.pathsep):\n if path and os.path.exists(os.path.join(path, name)):\n return True\n return False", "def which(exe):\n\n def wrapper(function):\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n if salt.utils.path.which(exe) is None:\n raise CommandNotFoundError(\n \"The '{}' binary was not found in $PATH.\".format(exe)\n )\n return function(*args, **kwargs)\n\n return wrapped\n\n return wrapper", "def executable_path(self):\n prepend = self._active_environment(ActiveEnvironment).prepend\n return prepend.get(\"PATH\", \"\")", "def _executable(self) -> str:\n return sys.executable", "def _locate_bootloader():\n pkg_path = os.path.dirname(__file__)\n blpath = os.path.abspath(os.path.join(pkg_path, 'bootloader'))\n if not os.path.isfile(blpath):\n raise InternalError(\"bootloader not found at {}\".format(blpath))\n return blpath", "def find_executable(executable, path=None):\n import os, os.path, sys\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n if os.name == 'os2':\n (base, ext) = os.path.splitext(executable)\n # executable files on OS/2 can have an arbitrary extension, but\n # .exe is automatically appended if no dot is present in the name\n if not ext:\n executable = executable + \".exe\"\n elif sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(executable)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n execname = executable + ext\n if os.path.isfile(execname):\n return execname\n else:\n for p in paths:\n f = os.path.join(p, execname)\n if os.path.isfile(f):\n return f\n else:\n return None", "def available(self):\n\t\treturn self.executable(self.path)", "def available(self):\n\t\treturn self.executable(self.path)", "def available(self):\n\t\treturn self.executable(self.path)", "def _fast_get_system_executable(self):\n if self.real_prefix or (\n self.base_prefix is not None and self.base_prefix != self.prefix\n ): # if this is a virtual environment\n if self.real_prefix is None:\n base_executable = getattr(sys, \"_base_executable\", None) # some platforms may set this to help us\n if base_executable is not None: # noqa: SIM102 # use the saved system executable if present\n if sys.executable != base_executable: # we know we're in a virtual environment, cannot be us\n if os.path.exists(base_executable):\n return base_executable\n # Python may return \"python\" because it was invoked from the POSIX virtual environment\n # however some installs/distributions do not provide a version-less \"python\" binary in\n # the system install location (see PEP 394) so try to fallback to a versioned binary.\n #\n # Gate this to Python 3.11 as `sys._base_executable` path resolution is now relative to\n # the 'home' key from pyvenv.cfg which often points to the system install location.\n major, minor = self.version_info.major, self.version_info.minor\n if self.os == \"posix\" and (major, minor) >= (3, 11):\n # search relative to the directory of sys._base_executable\n base_dir = os.path.dirname(base_executable)\n for base_executable in [\n os.path.join(base_dir, exe) for exe in (f\"python{major}\", f\"python{major}.{minor}\")\n ]:\n if os.path.exists(base_executable):\n return base_executable\n return None # in this case we just can't tell easily without poking around FS and calling them, bail\n # if we're not in a virtual environment, this is already a system python, so return the original executable\n # note we must choose the original and not the pure executable as shim scripts might throw us off\n return self.original_executable", "def cmdGetPath(self, cmd, die=True):\n rc, out, err = self.prefab.core.run(\"which %s\" % cmd, die=False, showout=False, profile=True)\n if rc > 0:\n if die:\n raise j.exceptions.RuntimeError(\"Did not find command: %s\" % cmd)\n else:\n return False\n return out.split(\"\\n\")[-1]", "def CheckProg(context, prog_name):\n\n context.Message(\"Checking whether %s program exists...\" % prog_name)\n path = context.env.WhereIs(prog_name)\n context.Result(bool(path))\n\n return path", "def which_bin(cmd):\n cmd = [\"which\", cmd]\n try:\n return stderr_output(cmd).strip().split('\\n')[0]\n except CryptoritoError:\n return None", "def locate(path):\n if (test_is_on_hadoop()):\n # Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However,\n # in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link.\n # Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins\n # machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST\n # be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/ are\n # smalldata and bigdata.\n p = os.path.realpath(os.path.join(\"/home/0xdiag/\",path))\n if not os.path.exists(p): raise ValueError(\"File not found: \" + path)\n return p\n else:\n tmp_dir = os.path.realpath(os.getcwd())\n possible_result = os.path.join(tmp_dir, path)\n while (True):\n if (os.path.exists(possible_result)):\n return possible_result\n\n next_tmp_dir = os.path.dirname(tmp_dir)\n if (next_tmp_dir == tmp_dir):\n raise ValueError(\"File not found: \" + path)\n\n tmp_dir = next_tmp_dir\n possible_result = os.path.join(tmp_dir, path)", "def _search_path_to_file(self, directory, binary_name):\n for root, dirs, files in os.walk(directory):\n if binary_name in files:\n return os.path.join(root, binary_name)\n raise micp_kernel.NoExecutableError", "def find_binary_in_path(filename: str) -> str:\n if \"PATH\" not in os.environ:\n raise PATHNotFoundError\n for directory in os.environ[\"PATH\"].split(os.pathsep):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n raise BinaryNotFoundError", "def find(name, arg=None):\r\n for p in get_processes():\r\n if p.name.lower().find(name.lower()) != -1:\r\n if arg is not None:\r\n for a in (p.cmdline or []):\r\n if a.lower().find(arg.lower()) != -1:\r\n return p\r\n else:\r\n return p\r\n return None", "def _compute_program_name():\n program_path = os.path.abspath(sys.argv[0])\n if os.path.exists(program_path):\n return os.path.basename(program_path)\n else:\n match = re.match(r\"^.*(?:\\.egg|\\.tar|\\.tar\\.gz)(?=/)\", program_path, re.IGNORECASE)\n if (match is not None) and os.path.exists(match.group(0)):\n # python script is embedded in egg\n return os.path.basename(program_path)\n else:\n return \"unknown\"" ]
[ "0.781772", "0.7618674", "0.74158335", "0.7389722", "0.7373021", "0.73094624", "0.7304917", "0.7303897", "0.7273199", "0.7246536", "0.7245675", "0.7232254", "0.72052497", "0.7189291", "0.7188439", "0.7188439", "0.7182894", "0.7165535", "0.7100432", "0.7094812", "0.6961689", "0.6958551", "0.6958551", "0.69398177", "0.69357026", "0.6921685", "0.69207394", "0.6888985", "0.6882886", "0.68822557", "0.682187", "0.6820966", "0.68179786", "0.68124074", "0.67763513", "0.6759703", "0.6744664", "0.67024964", "0.66493237", "0.664897", "0.66446716", "0.66320485", "0.663103", "0.6544635", "0.6504694", "0.64559186", "0.6448925", "0.64398104", "0.6432353", "0.64204156", "0.6400964", "0.6396262", "0.63466525", "0.633491", "0.6314656", "0.6306197", "0.630119", "0.62957907", "0.62823504", "0.62399185", "0.6238353", "0.62312895", "0.6199511", "0.6184493", "0.61829174", "0.6181791", "0.6156555", "0.61520076", "0.6147663", "0.6135373", "0.6090386", "0.607639", "0.6015136", "0.6004919", "0.6003354", "0.5994727", "0.5984696", "0.59823215", "0.5981221", "0.5979101", "0.59521085", "0.5924032", "0.591134", "0.59090865", "0.5895449", "0.5892731", "0.58906096", "0.5878847", "0.5876243", "0.5876243", "0.5876243", "0.58738875", "0.585965", "0.58588934", "0.5853625", "0.5845213", "0.5844167", "0.58177465", "0.581082", "0.5797547" ]
0.7299696
8
Try to get the path to pdb.py and return it in a list.
def GetPdbArgs(python): # Usually, python is /usr/bin/pythonxx and pdb is /usr/lib/pythonxx/pdb.py components = python.split('/') if len(components) >= 2: pdb_path = '/'.join(components[0:-2] + ['lib'] + components[-1:] + ['pdb.py']) if os.access(pdb_path, os.R_OK): return [pdb_path] # No pdb module found in the python path, default to -m pdb return ['-m', 'pdb']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def pdbfile_list():\n \n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def pdb_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\", \"pdb\")", "def get_scripts_already_ran(pgconn):\n\n cursor = pgconn.cursor()\n\n cursor.execute(textwrap.dedent(\"\"\"\n select script_path\n from rtapebbletest_schema_version\n \"\"\"))\n\n # Return these as a set, so lookups are a little faster.\n return {row.script_path for row in cursor}", "def get_breakpoint_files(self):\r\n return self.bpoints.values(key='filename')", "def fetchPDB(pdb_id):\n url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdb_id.split('.')[0]\n return urllib.urlopen(url).read()", "def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts", "def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)", "def fetchPDB(name, path):\n from Bio.PDB import PDBList\n pdbname = os.path.join(path,name+'.pdb')\n pdbl = PDBList()\n filename = pdbl.retrieve_pdb_file(name,pdir=path)\n os.rename(filename, pdbname)\n return", "def ReadPDB (self, pdb_path, db_path):\n\n ReadPDBFile (pdb_path, db_path)\t#", "def process_pdb(self, pdb_filename) :\n args = [self.command, pdb_filename]\n try :\n p = Popen(args, stdout=PIPE)\n (out,err) = p.communicate() \n except OSError :\n raise RuntimeError(\"Cannot communicate with STRIDE.\") \n return out", "def load_dbc_files(dbc_paths):\n import can_decoder\n from pathlib import Path\n\n db_list = []\n for dbc in dbc_paths:\n db = can_decoder.load_dbc(Path(__file__).parent / dbc)\n db_list.append(db)\n\n return db_list", "def read_confs():\n debugger = 'import pdb; pdb.set_trace()\\n'\n ignore = []\n\n home = expanduser('~')\n pdberc = home + '/.pdberc'\n\n confs = {}\n\n if not exists(pdberc):\n return {\n 'debugger': debugger,\n 'ignore': ignore,\n }\n\n with open(pdberc, 'r') as file:\n content = [line.strip() for line in file.readlines()]\n\n for line in content:\n if '=' in line and line.strip()[0] != '#':\n key, value = line.split('=')\n confs[key] = value\n\n if 'debugger' in confs:\n if confs['debugger'] == 'ipdb':\n debugger = 'import ipdb; ipdb.set_trace()\\n'\n\n if 'ignore' in confs:\n ignore = confs['ignore'].split(',')\n\n result = {\n 'debugger': debugger,\n 'ignore': ignore,\n }\n\n return result", "def load_pdblist(pdblist, addext = 0):\n\n\t#Load the pdblist, and convert to a list.\n\tlistfile = open(pdblist, 'r')\n\tpdbs = listfile.readlines()\n\t\n\tfor pdb in pdbs:\n\t\tpdbname = pdb.strip()\n\t\tif (addext):\n\t\t\tpdbname = pdb.strip() + '.pdb'\n\t\t\n\t\tcmd.load(pdbname)", "def getPdbInfo(self) -> ghidra.app.util.bin.format.pdb.PdbInfoIface:\n ...", "def get_python_args(fname, python_args, interact, debug, end_args):\n p_args = []\n if python_args is not None:\n p_args += python_args.split()\n if interact:\n p_args.append('-i')\n if debug:\n p_args.extend(['-m', 'pdb'])\n if fname is not None:\n if os.name == 'nt' and debug:\n # When calling pdb on Windows, one has to replace backslashes by\n # slashes to avoid confusion with escape characters (otherwise, \n # for example, '\\t' will be interpreted as a tabulation):\n p_args.append(osp.normpath(fname).replace(os.sep, '/'))\n else:\n p_args.append(fname)\n if end_args:\n p_args.extend(shell_split(end_args))\n return p_args", "def get_db_path():\n \n return(db_run.db_abs_path)", "def read_banfile():\n # matches stuff like\n # \"/GLOW/*\"\n # and extracts the stuff between the quotes\n regex = re.compile(r'^\\s*[\"](/[^\"]+)[\"]\\s*(?:$|[#])')\n bans = []\n\n try:\n with open(BAN_MAPFILE, \"r\", encoding=\"latin-1\") as filehandle:\n for line in filehandle:\n match = regex.match(line)\n if not match:\n continue\n else:\n bans.append(match.group(1))\n except EnvironmentError as err:\n if err.errno == errno.ENOENT:\n logging.getLogger(__name__).warning(\"%s not found - all mappings might fail!\", BAN_MAPFILE)\n else:\n raise\n\n return bans", "def lookup_module(filename):\r\n\r\n # stolen from pdb\r\n import os\r\n import sys\r\n\r\n if os.path.isabs(filename) and os.path.exists(filename):\r\n return filename\r\n f = os.path.join(sys.path[0], filename)\r\n if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:\r\n return f\r\n root, ext = os.path.splitext(filename)\r\n if ext == '':\r\n filename = filename + '.py'\r\n if os.path.isabs(filename):\r\n return filename\r\n for dirname in sys.path:\r\n while os.path.islink(dirname):\r\n dirname = os.readlink(dirname)\r\n fullname = os.path.join(dirname, filename)\r\n if os.path.exists(fullname):\r\n return fullname\r\n return None", "def db_file():\n return abspath('vmchecker.db')", "def get_attached_database_list(self, file=False):\n if self.isMSSQL():\n return [] # pragma: no cover\n else:\n cur = self._connection.cursor()\n cur.execute(\"PRAGMA database_list;\")\n res = cur.fetchall()\n cur.close()\n res = [r for r in res if r[1] != \"temp\" and r[1] != \"main\"]\n if file:\n return [(r[1], r[2]) for r in res]\n else:\n return [r[1] for r in res]", "def _extract_system_path(self, script):\r\n\r\n DEFAULT_PATH = ['code']\r\n\r\n # Separate paths by :, like the system path.\r\n raw_path = script.get('system_path', '').split(\":\") + DEFAULT_PATH\r\n\r\n # find additional comma-separated modules search path\r\n path = []\r\n\r\n for dir in raw_path:\r\n if not dir:\r\n continue\r\n\r\n # path is an absolute path or a path relative to the data dir\r\n dir = os.path.join(self.capa_system.filestore.root_path, dir)\r\n # Check that we are within the filestore tree.\r\n reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)\r\n if \"..\" in reldir:\r\n log.warning(\"Ignoring Python directory outside of course: %r\", dir)\r\n continue\r\n\r\n abs_dir = os.path.normpath(dir)\r\n path.append(abs_dir)\r\n\r\n return path", "def open_pdb_file(dir_pdb: str, debug_mode: bool = False):\n # Get the current logger\n logger = logging.getLogger('root.utils.open_pdb')\n if debug_mode:\n logger.setLevel(10)\n try:\n # open PDB file (as an np-array):\n pdb = np.genfromtxt(dir_pdb, dtype=\"str\")\n\n except FileNotFoundError as e:\n logger.warning(\":: PDB file not found!\")\n logger.error(f\":: {e}\")\n logger.warning(\":: The process was interrupted!\")\n # If no pdb file was found we can't proceed with this node.\n return exit()\n logger.debug(\":: PDB file read complete!\")\n return pdb", "def module_path():\n from sys import path\n from os import getcwd\n from os.path import basename,exists\n from inspect import getmodulename,getfile\n from logging import warn\n # 'getfile' retreives the source file name name compiled into the .pyc file.\n pathname = getfile(lambda x: None)\n if exists(pathname): return pathname\n # The module might have been compiled on a different machine or in a\n # different directory.\n pathname = pathname.replace(\"\\\\\",\"/\")\n filename = basename(pathname)\n dirs = [dir for dir in [getcwd()]+path if exists(dir+\"/\"+filename)]\n if len(dirs) == 0: warn(\"pathname of file %r not found\" % filename)\n dir = dirs[0] if len(dirs) > 0 else \".\"\n pathname = dir+\"/\"+filename\n return pathname", "def find_backtrace(self):\n return [ft for ft in os.listdir(self.output_dir)\n if os.path.isfile(ft) and ft.startswith(\"Backtrace.\")]", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]", "def get_database_path():\n\treturn _paths[_DATABASE_PATH_KEY]", "def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb", "def read_pdbs(directory, verbose=False):\n if verbose:\n print(\"Reading pdb input files from %s\" % directory)\n parser = PDBParser(PERMISSIVE=1, QUIET=True)\n if os.path.isdir(directory) and directory.endswith(\"/\"):\n try:\n pdbmodels = [parser.get_structure(\"Model_pair\", directory + f)[0] for\n f in listdir(directory) if f.endswith(\".pdb\")] # Generates pdb objects for files that end with .pdb\n except:\n sys.stderr.write(\"PDB files couldn't be opened. Please, revise that their format is correct.\")\n sys.exit(1)\n else:\n sys.stderr.write(\"Directory %s doesn't exists, please select a valid directory.\" % directory)\n sys.exit(1)\n if not bool(pdbmodels): # If no pdb instance is generated\n sys.stderr.write(\"No pdb files where read. Please make sure the given directory contains pdb files. \")\n sys.exit(1)\n for model in pdbmodels:\n if len(model.child_list) != 2:\n sys.stderr.write(\"A pdb input file doesn't contains two chains. Please, all input pdbs must only contain \"\n \"two chains.\")\n sys.exit(1)\n if verbose:\n print(\"Pdb objects stored\")\n return pdbmodels", "def _load_program():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), PROGRAM_TXT)\n f = open(filepath, 'r')\n program = f.read()\n f.close()\n return program.strip().split('\\n')", "def get_python_path():\n\n return get_executable_path('python')", "def debug_filename(pe):\n if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'):\n for i in pe.DIRECTORY_ENTRY_DEBUG:\n if hasattr(i.entry, 'PdbFileName'):\n return i.entry.PdbFileName.decode('utf-8', 'ignore')\n return None", "def getMHCIPDBFpBin(self):\n fn = \"mhcI_pdbs.bin\"\n return self.joinPath(self.mhcIdb_pdb_path, fn)", "def pathext_list():\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)", "def db(filename = 'P51-11'):\n import pdb\n sys.argv[1:] = ['-v', filename]\n pdb.run('extract.main()')", "def getImports(pth):\n if is_win or is_cygwin:\n if pth.lower().endswith(\".manifest\"):\n return []\n try:\n return _getImports_pe(pth)\n except Exception as exception:\n # Assemblies can pull in files which aren't necessarily PE,\n # but are still needed by the assembly. Any additional binary\n # dependencies should already have been handled by\n # selectAssemblies in that case, so just warn, return an empty\n # list and continue.\n # For less specific errors also log the traceback.\n logger.warning('Can not get binary dependencies for file: %s', pth)\n logger.warning(\n ' Reason: %s', exception,\n exc_info=not isinstance(exception, pefile.PEFormatError))\n return []\n elif is_darwin:\n return _getImports_macholib(pth)\n else:\n return _getImports_ldd(pth)", "def breakpoint_find(self, addr):\n return self._dll.JLINKARM_FindBP(addr)", "def _get_fuzzdb_path() -> Path:\n global BASE_PATH\n # Once BASE_PATH is set we do not want to change it so this is a no-op.\n if BASE_PATH:\n return BASE_PATH\n package_name = \"protofuzz\"\n module_name = \"fuzzdb\"\n search_paths: List[Path] = []\n fuzzdb_path: Optional[Path] = None\n # We prioritize checking the env variable over the project recursive\n # copy of fuzzdb as the env being set implies the user wants that\n # location.\n if BASE_PATH_ENVIRONMENT_VAR in os.environ:\n search_paths.append(Path(os.environ[BASE_PATH_ENVIRONMENT_VAR]))\n # We convert this to a Path as it will be easier to traverse in other\n # methods, Path only accepts strings/bytes\n module_path = Path(\n str(importlib.resources.files(package_name).joinpath(module_name))\n )\n search_paths.append(module_path)\n for module_path in search_paths:\n attack_path = module_path / Path(\"attack\")\n # Use the 1st directory we find that exists and seems like a fuzzdb dir\n if os.path.exists(attack_path):\n fuzzdb_path = attack_path\n break\n if not fuzzdb_path:\n raise RuntimeError(\"Could not import fuzzdb dependency files.\")\n BASE_PATH = fuzzdb_path\n return fuzzdb_path", "def read_pdb(filename):\n \n # Read the PDB file into memory.\n pdbfile = open(filename, 'r')\n\n # Extract the ATOM entries.\n # Format described here: http://bmerc-www.bu.edu/needle-doc/latest/atom-format.html\n atoms = list()\n for line in pdbfile:\n if line[0:6] == \"ATOM \":\n # Parse line into fields.\n atom = dict()\n atom[\"serial\"] = line[6:11]\n atom[\"atom\"] = line[12:16]\n atom[\"altLoc\"] = line[16:17]\n atom[\"resName\"] = line[17:20]\n atom[\"chainID\"] = line[21:22]\n atom[\"Seqno\"] = line[22:26]\n atom[\"iCode\"] = line[26:27]\n atom[\"x\"] = line[30:38]\n atom[\"y\"] = line[38:46]\n atom[\"z\"] = line[46:54]\n atom[\"occupancy\"] = line[54:60]\n atom[\"tempFactor\"] = line[60:66]\n atoms.append(atom)\n \n # Close PDB file.\n pdbfile.close()\n\n # Return dictionary of present residues.\n return atoms", "def _get_source(self, fullmodname):\n submodname, is_package, relpath = self._get_info(fullmodname)\n fullpath = self.path_entry + relpath\n source = self.datablocks[relpath]\n if hasattr(source, \"decode\"):\n source = source.decode(\"UTF-8\")\n source = source.replace('\\r\\n', '\\n')\n source = source.replace('\\r', '\\n')\n return submodname, is_package, fullpath, source", "def getImportList():\n\timports = []\n\tfor line in vim.current.buffer:\n\t\twords = string.split(line)\n\t\tif (len(words)>0 and (words[0]=='import' or words[0]=='from')):\n\t\t\tif words[1] not in imports:\n\t\t\t\timports.append(words[1])\n\treturn imports", "def get_path_db():\n\taiqc_config = get_config()\n\tif aiqc_config is None:\n\t\t# get_config() will print a null condition.\n\t\tpass\n\telse:\n\t\tdb_path = aiqc_config['db_path']\n\t\treturn db_path", "def test_load_pdb():\n\n itraj = os.path.join(path, \"benzene.pdb\")\n\n traj = tools.load_traj(itraj)\n\n assert traj.n_frames == 1\n assert traj.n_atoms == 12\n\n C = pt.select_atoms(\"@C\", traj.top)\n assert len(C) == 6\n\n H = pt.select_atoms(\"@H\", traj.top)\n assert len(H) == 6", "def get_crash_dumps_path(self):\n\t\treturn call_sdk_function('PrlApi_GetCrashDumpsPath')", "def _getImports_pe(pth):\n dlls = set()\n # By default library pefile parses all PE information.\n # We are only interested in the list of dependent dlls.\n # Performance is improved by reading only needed information.\n # https://code.google.com/p/pefile/wiki/UsageExamples\n\n pe = pefile.PE(pth, fast_load=True)\n pe.parse_data_directories(directories=[\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],\n pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'],\n ],\n forwarded_exports_only=True,\n import_dllnames_only=True,\n )\n\n # Some libraries have no other binary dependencies. Use empty list\n # in that case. Otherwise pefile would return None.\n # e.g. C:\\windows\\system32\\kernel32.dll on Wine\n for entry in getattr(pe, 'DIRECTORY_ENTRY_IMPORT', []):\n dll_str = winutils.convert_dll_name_to_str(entry.dll)\n dlls.add(dll_str)\n\n # We must also read the exports table to find forwarded symbols:\n # http://blogs.msdn.com/b/oldnewthing/archive/2006/07/19/671238.aspx\n exportSymbols = getattr(pe, 'DIRECTORY_ENTRY_EXPORT', None)\n if exportSymbols:\n for sym in exportSymbols.symbols:\n if sym.forwarder is not None:\n # sym.forwarder is a bytes object. Convert it to a string.\n forwarder = winutils.convert_dll_name_to_str(sym.forwarder)\n # sym.forwarder is for example 'KERNEL32.EnterCriticalSection'\n dll = forwarder.split('.')[0]\n dlls.add(dll + \".dll\")\n\n pe.close()\n return dlls", "def get_hookscript_path ( self ):\n return self.hook_script_fspath", "def binpath(self):\n return self._query_config()['binpath']", "def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path", "def listPaths():\n try:\n paths = [x[1] for x in parseFstab(FSTAB)]\n return paths\n except DMException:\n return []", "def _get_pex_paths(self) -> list:\r\n pex_paths: list = []\r\n\r\n for object_name, script_path in self.psc_paths.items():\r\n pex_path = os.path.join(self.options.output_path, object_name.replace('.psc', '.pex'))\r\n\r\n # do not check if file exists, we do that in _find_missing_script_paths for a different reason\r\n if pex_path not in pex_paths:\r\n pex_paths.append(pex_path)\r\n\r\n return pex_paths", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def scan_path(executable=\"mongod\"):\n for path in os.environ.get(\"PATH\", \"\").split(\":\"):\n path = os.path.abspath(path)\n executable_path = os.path.join(path, executable)\n if os.path.exists(executable_path):\n return executable_path", "def load_source(self, path, alias = None):\n\t\ttry:\n\t\t\treturn list(open(path, \"r\"))\n\t\texcept OSError:\n\t\t\treturn None", "def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))", "def get_symbols(obj_path):\n cmd = ['nm', obj_path]\n res = subprocess.run(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n return res.stdout.decode()", "def syspath():\n import sys\n pprint(sys.path)", "def retrievePdb(name, guid):\r\n symbolservers = ['http://symbols.hacst.net/', 'http://mumble.info:8080/symbols/']\r\n \r\n for symbolserver in symbolservers:\r\n if retrievePdbFrom(name, guid, symbolserver):\r\n return True\r\n \r\n return False", "def download_pdb(pdb, pdb_path=None):\n if pdb_path is None:\n pdb_path = PDB_PATH\n \n path_uniprot = Path(pdb_path)\n name = pdb + \".pdb\"\n url = \"https://files.rcsb.org/download/\" + pdb + \".pdb\"\n full_name = path_uniprot / name\n # Check if .pdb is already downloaded, if not => download\n if not full_name.is_file():\n download_url(url, str(path_uniprot), name)", "def get_breakpoints(self, filename=None, lineno=None):\r\n if filename is None:\r\n return self.bpoints.items()\r\n if lineno is None:\r\n return self.bpoints.filter( keys=('filename',),\r\n values=(filename,) )\r\n else:\r\n return self.bpoints.filter( keys=('filename','lineno'), \r\n values=(filename, lineno) )", "def find_modules(x):\n return Path(x).rglob('*.py')", "def load_pdb(self):\n # 'atoms': list with the residue id for every atom.\n pdb = self.pdb\n for l_i in range(len(pdb)): \n dat = bio_lib.get_labels(pdb[l_i])\n res_atm = dat[0]\n res_nam = dat[1]\n res_ind = dat[2]\n res_chn = dat[3]\n self.identifiers.append([res_nam, res_ind, res_chn]) \n #x_i = dat[4]\n #y_i = dat[5]\n #z_i = dat[6]\n # Adjusted coordinates returned from PDB are not strictly formatted.\n if len(pdb[l_i]) > 10:\n x_i = pdb[l_i][31:].split()[0]\n y_i = pdb[l_i][31:].split()[1]\n z_i = pdb[l_i][31:].split()[2]\n c_i = \" \".join([res_atm, x_i, y_i, z_i])\n self.res_atm_xyz.append(c_i)", "def _get_pyfilelist(srcpath, usegitignore=True) -> list:\n gitignorefile = srcpath / Path(\".gitignore\")\n if usegitignore and gitignorefile.exists():\n with gitignorefile.open('r') as f:\n lines = f.read().splitlines()\n gitignore = [\n srcpath / Path(line)\n for line in lines\n if not line.strip().startswith(\"#\")\n and len(line.strip()) > 1\n and Path(line).suffix == \"\"\n ] + [srcpath / Path(\".git\")]\n viablepaths = [\n p for p in srcpath.glob(\"*/\") if p.is_dir() and p not in gitignore\n ]\n filelist = set().union(*[set(p.glob(\"**/*.py\")) for p in viablepaths])\n filelist = filelist.union(*[set(srcpath.glob('*.py'))])\n else:\n filelist = srcpath.glob(\"**/*.py\")\n return [p.relative_to(srcpath) for p in filelist]", "def get_db_path():\n return os.path.join(sys.path[0], \"my_db.db\")", "def files(cls, pdb_object):\n return [pdb_object.flags_relax,\n pdb_object.constraints,\n pdb_object.minimized.hidden_complexes,\n pdb_object.minimized.complex.pdb,\n pdb_object.minimized.scores]", "def path():\n # Exclude path to this script from path.\n this_file = os.path.realpath(__file__)\n this_path = os.path.dirname(this_file)\n return os.pathsep.join(p for p in sys.path if p != this_path)", "def get_pdb_content_instance(pdb_file):\n pdb_content = PDBContent(pdb_file)\n return pdb_content", "def SearchPath(name, path=None):\n path = path or os.environ['PATH']\n for dir in path.split(os.pathsep):\n binpath = os.path.join(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None", "def _get_sideboard_plugin_where_import_originated():\n for module_name, filename in _yield_module_names_and_filenames_from_callstack():\n potential_plugin_name = module_name.split('.')[0]\n if _is_plugin_name(potential_plugin_name):\n return potential_plugin_name\n\n potential_plugin_name = _venv_plugin_name(filename)\n if _is_plugin_name(potential_plugin_name):\n return potential_plugin_name\n\n return None", "def read_bplist(path: str) -> dict:\n return _read_plist(path, plistlib.FMT_BINARY)", "def get_package_list_filepath(*args, **kwargs):\n logger.debug(\"Getting the package file filepath\")\n user = path.expanduser(\"~\")\n filepath = f\"{user}/.gitget.yaml\"\n logger.debug(\"Filepath found\")\n return filepath", "def get_source_paths():\r\n script_paths = set()\r\n try:\r\n script_paths.update(filter(None, os.environ.get(PYENV).split(os.pathsep)))\r\n script_paths.update(filter(None, os.environ.get(MELENV).split(os.pathsep)))\r\n except AttributeError:\r\n logger.debug('No custom environ variables set.')\r\n\r\n cwd = os.path.dirname(os.path.abspath(__file__))\r\n for each in os.listdir(cwd):\r\n path = os.path.join(cwd, each)\r\n if not os.path.isdir(path) or each.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n script_paths.add(path)\r\n\r\n return script_paths", "def get_db_path_from_config():\n if not globalConf.sections():\n raise OSError(\"Cannot read config file.\")\n databases = parse_databases()\n db_path = None\n for db in databases:\n if db[0] == LocalDB:\n if db_path is None:\n db_path = db[1]\n else:\n raise ValueError(\"Multiple local database files are listed \"\n \"in the config file.\")\n if db_path is None:\n raise ValueError(\"No local database file is listed in the config file.\")\n return db_path", "def find_source(self, path):\n\t\ttry:\n\t\t\treturn self.sources[path]\n\t\texcept KeyError:\n\t\t\tlines = None\n\t\t\tif os.path.isabs(path):\n\t\t\t\tlines = self.load_source(path)\n\t\t\telse:\n\t\t\t\tfor p in self.paths:\n\t\t\t\t\tp = os.path.join(p, path)\n\t\t\t\t\tlines = self.load_source(p, path)\n\t\t\t\t\tif lines != None:\n\t\t\t\t\t\tself.sources[p] = lines\n\t\t\t\t\t\tbreak\n\t\t\tself.sources[path] = lines\n\t\t\treturn lines", "def getOMFSrcModuleFiles(self) -> List[ghidra.app.util.bin.format.pe.debug.OMFSrcModuleFile]:\n ...", "def list():\n\n return cache.codeTableList()", "def binpath(self):\n return self.__bin", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def get_rpaths(dylib_path):\n load_cmds = get_load_commands(dylib_path)\n rpath_cmds = filter(lambda cmd: cmd.name == 'LC_RPATH', load_cmds)\n path_re = re.compile('path (?P<rpath>.*) \\(.*\\)')\n\n rpaths = []\n for cmd in rpath_cmds:\n for line in cmd.lines:\n match = path_re.search(line)\n if match:\n rpaths.append(match.group('rpath'))\n return rpaths", "def get_possible_paths():\n yield ('mtad', get_mtad_linter_path())\n yield ('bundled', get_bundled_linter_path())", "def path(self):\n path = self.bidirectional_cpp.getPath()\n # format as list on return as SWIG returns \"tuple\"\n if len(path) <= 0:\n return None\n\n _path = []\n # Convert path to its original types and return\n for p in path:\n if p in [\"Source\", \"Sink\"]:\n _path.append(p)\n else:\n if \"int\" in self._original_node_type.__name__:\n _path.append(int(p))\n elif \"str\" in self._original_node_type.__name__:\n _path.append(str(p))\n return _path", "def _find_gitmodules(p4, stream_name):\n parent = p4gf_util.first_dict(p4.run('stream', '-ov', stream_name))\n for line in parent['View']:\n if '.gitmodules' in line:\n # return everything up to the ' .gitmodules' at the end of the line\n return line[:-12]\n return None", "def get_imports(inputfile) -> list:\n with inputfile.open('rb') as f:\n lines = f.read().decode(errors='replace').splitlines()\n\n lines = [line for line in lines if len(line.strip()) > 0]\n importlist = [\n iline.split()[1]\n for iline in [\n line.strip()\n for line in lines\n if (line.strip().split()[0] == \"import\")\n or (line.strip().split()[0] == \"from\")\n ]\n if len(iline.split()) > 1\n ]\n return importlist", "def get_in_pdb_path(cfg, coord_id):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"pdb\" + MAX_OC_PDB_SUFFIX,\n coord_id + \".pdb\")", "def loadBlockchain(path):\r\n list = []\r\n filename = Blockchain(0, '0', 0, 0, 0).getFilename()\r\n f = open(path)\r\n for line in f:\r\n if line == '\\n':\r\n continue\r\n linesplit = line[:-1].split(';')\r\n b = Blockchain(int(linesplit[0]), linesplit[1], int(linesplit[2]), int(linesplit[3]) ,linesplit[4])\r\n list.append(b)\r\n f.close()\r\n return list", "def _get_package_data() -> list[list[str]]:\n moddata = []\n modlist: tuple[str, ...] = (\n \"click\",\n \"cryptography\",\n \"globus_cli\",\n \"globus_sdk\",\n \"jmespath\",\n \"requests\",\n )\n if verbosity() < 2:\n modlist = (\"globus_cli\", \"globus_sdk\", \"requests\")\n\n for mod in modlist:\n cur = [mod]\n try:\n loaded_mod = __import__(mod)\n except ImportError:\n loaded_mod = None\n\n for attr in (\"__version__\", \"__file__\", \"__path__\"):\n # if loading failed, be sure to pad with error messages\n if loaded_mod is None:\n cur.append(\"[import failed]\")\n continue\n\n try:\n attrval = getattr(loaded_mod, attr)\n except AttributeError:\n attrval = \"\"\n cur.append(attrval)\n moddata.append(cur)\n\n return moddata", "def scriptpath(self, code) -> str:\n return ''", "def does_line_contains_import_pdb(line: str) -> bool:\n return IMPORT_PDB_LINE.strip().split() == line.split()", "def get_code():\n return inspect.getsource(sort)", "def resolve_test_progs(sconscript_filename):\n\treprg = re.compile(r\"\"\"^env.Program\\([\"'](.*?)['\"]\"\"\")\n\tprogs = []\n\tfor line in open(sconscript_filename):\n\t\tm = reprg.match(line.strip())\n\t\tif m:\n\t\t\tprogs.append(m.group(1))\n\treturn progs", "def getSequencefromPDB(pdbfile, chain='C', index=0):\n parser = PDB.PDBParser(QUIET=True)\n struct = parser.get_structure(pdbfile,pdbfile)\n ppb = PDB.PPBuilder()\n model = struct[0]\n peptides = ppb.build_peptides(model[chain])\n seq=''\n for i,pep in enumerate(peptides):\n seq+=str(pep.get_sequence())\n return seq", "def source(self):\n return some.dap.source(py.path.local(self.co_filename))", "def get_bundled_schema_path():\n return str(data.load_resource(\"schema\"))", "def to_pdb_file(self, pdb_path: str) -> None:\n with open(pdb_path, \"w\", encoding=\"utf-8\") as pdb_file:\n pdb_file.write(self.to_pdb_block())", "def detect_chains(input_pdb):\n opened_pdb = open(input_pdb, \"r\").readlines()\n chains = []\n for value in opened_pdb[1:]:\n try:\n if value[21] not in chains:\n chains.append(value[21])\n except:\n continue\n return chains", "def get_parts(dbpath):\n odb = openOdb(path=dbpath)\n _parts = []\n for _name,_prt in odb.parts.items():\n _type = _prt.type\n _parts.append((_name,_type))\n return _parts", "def cfgPathToList( arg ):\n from types import StringTypes\n listPath = []\n if type( arg ) not in StringTypes:\n return listPath\n while arg.find( '/' ) == 0:\n arg = arg[1:]\n return arg.split( '/' )", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def getBaseSrcFile(self) -> List[int]:\n ..." ]
[ "0.7001057", "0.6927535", "0.6694564", "0.5969401", "0.5900031", "0.58544034", "0.5816103", "0.58008647", "0.58002234", "0.57855517", "0.57504606", "0.5737899", "0.5679186", "0.5636797", "0.55898374", "0.5576825", "0.54973745", "0.53993475", "0.53957206", "0.53377414", "0.5327768", "0.53255343", "0.5325156", "0.5315229", "0.53067875", "0.5305509", "0.5305509", "0.5298336", "0.52901244", "0.52893674", "0.5283258", "0.5273122", "0.5272657", "0.52708834", "0.52597684", "0.5244227", "0.5238374", "0.52238065", "0.5207328", "0.5180643", "0.5145235", "0.51423573", "0.5135963", "0.5135514", "0.51279664", "0.5125572", "0.5118933", "0.5118097", "0.51155", "0.5097132", "0.50887895", "0.5073447", "0.5061569", "0.50499725", "0.5048194", "0.5039501", "0.5036012", "0.5035587", "0.50328624", "0.50312555", "0.50240004", "0.50221825", "0.5019939", "0.5013896", "0.5003576", "0.5002881", "0.5001166", "0.49967423", "0.4996655", "0.49870777", "0.49865952", "0.49808764", "0.49782845", "0.49749008", "0.49728975", "0.4969511", "0.49686366", "0.4959479", "0.49579576", "0.4949135", "0.4944925", "0.4942625", "0.4938072", "0.4937465", "0.49351248", "0.4928241", "0.49262565", "0.49184752", "0.49132144", "0.49071005", "0.49037576", "0.49035022", "0.49003115", "0.48940745", "0.48899758", "0.4885635", "0.48851764", "0.48805198", "0.48787287", "0.48699334" ]
0.72876877
0
Print usage for the stub script.
def PrintOurUsage(): print 'Stub script %s (auto-generated). Options:' % sys.argv[0] print ('--helpstub ' 'Show help for stub script.') print ('--debug_binary ' 'Run python under debugger specified by --debugger.') print ('--debugger=<debugger> ' "Debugger for --debug_binary. Default: 'gdb --args'.") print ('--debug_script ' 'Run wrapped script with python debugger module (pdb).') print ('--show_command_and_exit ' 'Print command which would be executed and exit.') print ('These options must appear first in the command line, all others will ' 'be passed to the wrapped script.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def usage():", "def usage():", "def print_usage():\n print(helptxt)\n sys.exit(2)", "def usage():\n pass", "def usage() :\n\n print usage.__doc__", "def usage():\n print(__doc__.strip())", "def display_usage():\n print >> sys.stderr, __doc__", "def print_usage():\r\n print(\"USAGE: python[3] pso.py [<seed>] [<filename>]\")\r\n print(\" where:\")\r\n print(\"\\t[<seed>]\\tOPTIONAL - seed for the random generator\")\r\n print(\"\\t[<filename>]\\tOPTIONAL - name for the output file\")", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def usage():\n print(\"Hello World!\")", "def usage():\n print(\"\"\"Usage:\n\\t%s storer - initialize storer machine\n\\t%s tester - initialize tester machine\n\\t%s --help - print this message\"\"\"% (sys.argv[0], sys.argv[0], sys.argv[0]))", "def show_usage():\n\n usage_screen = \"\\nUsage:\\n\" \\\n f\" {basename(argv[0])} <mock_1> [<mock_2> ...]\\n\" \\\n \"\\nOptions:\\n\" \\\n \" mock-departments Send HTTP requests to create some mock departments in the backend.\\n\" \\\n \" mock-employees Send HTTP requests to create some mock employees in the backend.\\n\" \\\n \" help Show this help page.\\n\" \\\n \"\" \\\n \" verbose Enables detailed request logging for the remaining options.\\n\"\n print(usage_screen)", "def print_usage():\r\n\tprint(\"Usage: python gddownloader.py arg\")\r\n\tprint(\"where arg can be one of the following:\")\r\n\tprint(\" github url (e.g. https://github.com/user/repo)\")\r\n\tprint(\" path to txt file containing github urls\")", "def showUsage():\n None", "def print_usage():\r\n print \"\"\r\n print \"\"\r\n print \" JoomFind v0.1\"\r\n print \"\"\r\n print \" Script made by Jasdev Singh\"\r\n print \"\"\r\n print \" This script is made only for educational and offline self-testing \"\r\n print \" purposes. The creator is not responsible or accountable for any \"\r\n print \" damage or loss caused that you perform with this script. \"\r\n print \"\"\r\n print \" Usage example:\"\r\n print '\\tpython joomfind.py -f filepath | -v'\r\n print \"\"\r\n print \" Put URL(s) to scan in a newline delimited file\"\r\n print \" URL(s) must point to homepage of the CMS \"\r\n print \"\"\r\n print \" Options:\"\r\n print \" -f filename (specify input file)\"\r\n print \" -v, --verbose (show detailed output)\"\r\n print \" --help (displays this help text)\"\r\n print \"\"\r\n return", "def print_usage(msg):\n print('Usage: ' + msg)", "def usage():\n return _usage", "def usage():\n print \"\"\n print \"Usage: python parabot.py [options] <testsuite.tsv>\"\n print \"\"\n print \"<testsuite.tsv> can be absolute or relative path + filename of a testsuite.\"\n print \"The containing folder will be used as working directory\"\n print \"\"\n print \"Options:\"\n print \"-h\\t--help\\t\\tThis screen\"\n print \"-i\\t--include\\tInclude a tag\"\n print \"-e\\t--exclude\\tExclude a tag\"\n print \"-f\\t--forceserial\\tForces serial test execution\"\n print \"-b\\t--basedir\\tSet parabots base dir\"\n print \"\"", "def print_usage(self):\n print((\"@brief Usage is not defined for command \" + self.command))", "def printhelp():", "def usage():\n program_name = sys.argv[PROGRAM_ARG_NUM]\n print(\"Usage:\")\n print(\"%s IP LOGFILE [PORT]\" % program_name)\n print(\" IP : IP address of host running the desired FTP Server.\")\n print(\" LOGFILE : Name of file containing FTP Client log details.\")\n print(\" PORT (optional) : Port used to connect to FTP Server. Default is\"\\\n \" 21.\")", "def printUsage():\n print 'Usage: wue2stein.py nodeFile edgeFile steinFile'", "def printCLIHelp():\n \n cmd = os.path.basename(sys.argv[0])\n print \"\"\"\n - quickCurve - \n\nPerform a liklihood analysis on Fermi LAT data. You can use the\ncommand line functions listed below or run this module from within\npython. For full documentation on this module execute 'pydoc\nquickCurve'.\n \n%s (-h|--help) ... This help text.\n \n%s (-i|--initialize) ... Generate a default config file called\n example.cfg. Edit this file and rename it <basename>.cfg for use\n in the quickLike module.\n\n%s (-a|--analyze) (-n |--basename=)<basename> ... Perform an analysis\n on <basename>. <basename> is the prefix used for this analysis.\n You must already have a configuration file if using the command\n line interface.\n\n\"\"\" %(cmd,cmd,cmd)", "def print_usage(retcode=None):\n\n print(USAGE)\n\n if retcode is not None:\n sys.exit(retcode)", "def usage():\n print 'cpp file Generator.'\n print 'Usage:'\n print ' -h: help'\n print ' -i: file'\n print ' --help: help'\n print ' --in: help'\n sys.exit(1)", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def test_usage(self):\n # Make sure the usage message is shown when no arguments\n # are given and when the -h or --help option is given.\n for options in [], ['-h'], ['--help']:\n exit_code, output = run_cli(*options)\n assert \"Usage:\" in output", "def print_usage():\n print(\"usage: MILP.py -p <parameter file> -i <payoff file> -o <output file>\")\n print(\"-p, --params\\t sets the parameter file\")\n print(\"-i, --payoff\\t sets the payoff file\")\n print(\"-o, --output\\t sets the output file. Defaults to out.csv\")\n print(\"-d, --delimiter\\t sets the delimiter of ALL files. Defaults to csv\")", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)", "def usage():\n\n print(\"\\nHere is how you can use this script\\n\")\n print(\"Usage: python %s\"%sys.argv[0])\n print(\"\\t --input=<file>\")", "def usage(rc, errmsg=\"\"):\n#-------------------------------------------------------------------------------\n print >> oss.stderr, __doc__\n if errmsg:\n print >> oss.stderr, \"\\nError:\\n\" + str(errmsg)\n oss.exit(rc)", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "def help():", "def _usage():\n txt = \\\n f\"[INFO] Usage: {sys.argv[0]} lis_config_template restart_dir YYYYMMDD\"\n print(txt)\n print(\"[INFO] where: \")\n print(\"[INFO] lis_config_template: Path to sample lis.config file.\")\n print(\"[INFO] restart_dir: Path to LIS restart files.\")\n print(\"[INFO] YYYYMMDD: year/month/day of start of next LIS run.\")", "def print_usage(prog: str, prm: dict):\n\n\tprint()\n\tprint('Usage:')\n\tprint()\n\tprint(f'{prog} svc_name ' + ' '.join(['[%s=x]'%(x) for x in prm]))\n\tprint()\n\tprint('Default values for optional parameters:')\n\tprint()\n\tfor k,v in prm.items(): print(f'{k} : {v}')\n\tprint()\n\tsys.exit(-1)", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()", "def usage():\n with open(USAGE, 'r') as f:\n for line in f:\n print(line)", "def usage():\n print(\"This program runs with command line arguments.\\n\"\n \"Available parameters:\\n\"\n \"\\t-h --help : help\\n\"\n \"\\t-f : file name or path\\n\"\n \"\\t-k : key file\\n\"\n \"\\t-o : operaion\\n\"\n \"\\n\"\n \"There are 3 operations available:\\n\"\n \"\\t'1' --> add_sign() : adds hash to end of file\\n\"\n \"\\t'2' --> check() : checks if added hash and current hash are matched\\n\"\n \"\\t'3' --> remove_sign() : remove hash from end of file which has added with operion 1\\n\"\n \"\\n\"\n \"Example command: $python3 checker.py -f message.pdf -k key_file.txt -o 1\")", "def printUsage(launchScriptName):\n print(\"\")\n print(\"usage:\")\n print(\"------\")\n print(\"\")\n print(launchScriptName)\n print(\"\\t[ -i | -interpreter | -c | -cmdprompt | -bg | -background ]\")\n print(\"\\t[ -n | -nogui ] [ -p <port> | -port <port> ]\")\n print(\"\\t[ -l <logfile> | -logfile <logfile> ] [ -h | -help ]\")\n print(\"\")", "def do_usage():\r\n progname = sys.argv[0]\r\n if string.find(progname, os.sep) > -1:\r\n progname = string.split(sys.argv[0], os.sep)[-1]\r\n elif string.find(progname, os.altsep) > -1:\r\n progname = string.split(sys.argv[0], os.altsep)[-1]\r\n print \"\"\"\r\n%s (version: %s)\r\n\r\nUSAGE:\r\n\r\n \"-h\", \"-?\", \"help\", \"--help\", \"h\":\r\n prints this help message\r\n \r\n \"-v\", \"--verbose\":\r\n turns on verbose mode (prints useful messages about what the program is doing)\r\n\r\n \"-c\", \"--clean\":\r\n deletes all files from the temporary directory\r\n\r\n \"-ns\", \"--nostain\": \r\n don't use the stains (non-standard tinctures) of\r\n Murrey (dark reddish purple), Sanguine (blood red) and Tenné (orange) \r\n\r\n \"-s\", \"--silent\": \r\n don't produced any output messages at all \r\n\r\n n:(a number), n=(a number)\r\n sets the number of badges to create\r\n \r\n\"\"\" % (progname, __VERSION__)\r\n sys.exit()", "def usage(code, msg=''):\n if msg:\n print(msg, file=sys.stderr)\n print(file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(code)", "def usage(code, msg=''):\n if msg:\n print(msg, file=sys.stderr)\n print(file=sys.stderr)\n print(__doc__ % globals(), file=sys.stderr)\n sys.exit(code)", "def printUsage(self):\n print >> sys.stderr\n print >> sys.stderr, \"usage \", self.programName+\":\"\n for pname, pvalue in self.neededParamsNames.items():\n print >> sys.stderr, \" \", pname, pvalue[3]\n for pname, pvalue in self.optionalParamsNames.items():\n print >> sys.stderr, \" [\",pname, pvalue[3], \"]\"", "def print_usage():\n print 'USAGE: %s [options]' % os.path.abspath(__file__)\n print 'EXAMPLE1: %s # FOR DEFAULTS' % os.path.abspath(__file__)\n print 'EXAMPLE2: %s 121f03=tweek hirap=towelie details=False # TWO SMALL SETS' % os.path.abspath(__file__)\n print 'EXAMPLE3: %s 121f03=tweek details=True # ONE DETAILED SET' % os.path.abspath(__file__)\n print 'EXAMPLE4: %s details=True # SHOWS MAX INFO' % os.path.abspath(__file__)", "def usage():\n\tprint()\n\tprint(\n\t\t'\\t Usage: python conos_aicuu_client.py [-h] [--help] <ENDPOINT> <ENVIRONMENT> <CLIENT_ID> <CLIENT_SECRET> <INPUT_FILE> <OUTPUT_FILE> <NUMBER_THREAD>')\n\tprint('\\t -h : help')\n\tprint('\\t ENDPOINT : 1: /v1.0/person')\n\tprint('\\t 2: /v1.0/company')\n\tprint('\\t ENVIRONMENT : must be one of: dev, test, int, prod')\n\tprint('\\t CLIENT_ID : used for obtaining access token')\n\tprint('\\t CLIENT_SECRET : used for obtaining access token')\n\tprint('\\t INPUT_FILE : location of the request input file')\n\tprint('\\t OUTPUT_FILE : location of the output file (analyze report)')\n\tprint('\\t NUMBER_THREAD : (optional) number of threads to process data (from 1 -> 8, default is 1)')\n\tprint('\\n\\t Example : python conos_aicuu_client.py 1 test Admin 123456 data/person/input.txt data/person/output.txt 5')\n\texit(2)", "def Usage():\r\n print \"Correct Usage:\"\r\n print \"python primesBelow.py <integer>\"", "def test_cli_help(run):\n msg = []\n _, err = run(dork.cli.the_predork_cli, msg, *(\"\", \"-h\"))\n assert \"usage: \" in msg[0], \\\n \"Failed to run the cli.main method: {err}\".format(err=err)", "def _usage():\n txt = f\"[INFO] Usage: {sys.argv[0]}\"\n txt += \" configfile topdatadir YYYYMM model_forcing\"\n print(txt)\n print(\"[INFO] where:\")\n print(\"[INFO] configfile is path to LDT parameter file\")\n print(\"[INFO] topdatadir is top-level directory for LIS data\")\n print(\"[INFO] YYYYMM is month to process\")\n print(\"[INFO] model_forcing is ID for atmospheric forcing for LIS\")", "def usage(arg):\r\n\r\n logging.debug('usage({})'.format(arg))\r\n\r\n myDocstring = \"\"\r\n if arg in [\"-i\", \"--info\"]:\r\n myDocstring = myDocstring+\"\\n\"+__doc__\r\n myDocstring = myDocstring+\"\\n AUTHOR\\n\\n $author$\\n\"\r\n myDocstring = myDocstring+\"\\n LICENSE\\n\\n $license$\\n\"\r\n parser.print_help()\r\n elif arg in [\"-v\", \"-i\", \"--version\", \"--info\"]:\r\n myDocstring = myDocstring+\"\\n VERSION\\n\\n $version$\\n\" \r\n myDocstring = myDocstring.replace('$version$', __version__)\r\n myDocstring = myDocstring.replace('$author$',__author__)\r\n myDocstring = myDocstring.replace('$license$',__license__)\r\n print(myDocstring)", "def help(self, dummy):\r\n help = self.doc + \"\\n\"\r\n if help.find(\"%s\") > 0:\r\n help = help.replace(\"%s\", self.progname)\r\n print_function(help, end='', file=self.stdout)\r\n self.exit(0)", "def usage():\n print(\"usage: %s <N bound> <S bound> <W bound> <E bound> \" %\n (sys.argv[0]) +\n \"<fault file> <station file> <map prefix> <topo> <coastal> <border>\")", "def usage(rc, errmsg=\"\"):\n#-------------------------------------------------------------------------------\n print >> oss.stderr, __doc__\n if errmsg:\n print >> oss.stderr, \"\"\"\nError:\n\"\"\" + str(errmsg)\n oss.exit(rc)", "def usage(fd):\n fd.write('Usage:\\n')\n exe = os.path.basename(sys.argv[0])\n fd.write(\" %s <formula file> <sha> <os/arch> <root url>\\n\" % exe)\n fd.write('\\n')\n fd.write('For example:\\n')\n fd.write(\n \" %s /some/dir/gcc.rb 123... tiger_g3 https://some.server/bottles\\n\" \\\n % exe\n )", "def usage():\n\tprint \"Usage: \" + sys.argv[0] + \" --include\"", "def help():\n print(UI.HELP)", "def usage():\n usage_string = \"oxclient.py\\n\" + \\\n \"\\t[-n, --number] -- NNNN [0-9999] (optional: default is \" + str(CONFIG['number']) + \")\\n\" + \\\n \"\\t[-H, --host] -- Something like freedom.dynalis.org(optional: default is \" + str(CONFIG['host']) + \")\\n\" + \\\n \"\\t[-p, --port] -- Something like 50007(optional: default is \" + str(CONFIG['port']) + \")\\n\" + \\\n \"\\t[-l, --logfile] -- log file(optional: default is \" + str(CONFIG['logfile']) + \")\\n\" + \\\n \"\\t[-s, --stdout] -- stdout on.\\n\" + \\\n \"\\t[-d, --debug] -- debug on.\\n\" + \\\n \"\\t[-h, --help] -- show usage.\\n\\n\" + \\\n \"oxclient.py --number 9999 --host freedom.dynalis.org --port 50007 --logfile /tmp/oxclient.log --stdout\\n\"\n print(usage_string)", "def usage():\n print \"\"\"\nUsages:\n ./lmf-merger.py -m file1 file2 outfile\n ./lmf-merger.py -s file1 ...\n example: ./lmf-merger.py data/lmf/ac_eko_dlouhe_50_xxx_lmf.xml \\\n data/lmf/ac_frs_dlouhe_50_xxx_lmf.xml ac_eko-frs_dlouhe_50_xxx_lmf.xml\n \"\"\"", "def help():\n \n pass", "def print_usage(exitcode, reason=None):\n if reason is not None:\n print('Error: %s' % reason)\n print('Usage: %s <broker> [opts] [<topic>] [<schema_registry>]' % sys.argv[0])\n print('Options:')\n print(' --consumer, --producer, --avro, --performance - limit to matching tests')\n\n sys.exit(exitcode)", "def usage():\n print(\"Usage:\")\n print(\"songtiment.py [-s \\\"song\\\"] -a \\\"artist\\\"\")", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def print_usage():\n leader = \" \"\n print(\"\\n Usage: scanning [-v|-c|-k=N] SOURCE PAPER SCALE COLOR [basename]\\n\")\n print(\" SOURCE Paper source:\")\n print_option_set(scan_core.SOURCES, leader)\n print(\" PAPER Paper size:\")\n print_option_set(scan_core.PAPERS, leader)\n print(\" SCALE Scaling factor:\")\n print_option_set(scan_core.SCALES, leader)\n print(\" COLOR Colour mode:\")\n print_option_set(scan_core.COLORS, leader)\n print(\" basename Desired base filename, optionally including path\")\n print(\" -v View each scan when conversion is complete\")\n print(\" -c Confirm each scan before saving in final location\")\n print(\" -d Print the scanning a conversion commands used for debugging\")\n print(\" -k=N Do not convert page N of scan\\n\")\n print(\"SCANNING Script (c)2010 Jody Sankey\")\n version = sys.version_info\n print(\"Currently running in Python v{}.{}.{}\\n\".format(*version))\n sys.exit()", "def usage():\n print(\"\\n\" + \n \"The following are arguments required:\\n\" + \n \"-i: the density threshold.\\n\" +\n \"-o: the output file.\\n\" +\n \"-v: the bridge cut version (\" + \", \".join(BridgeCut.VERSIONS) + \").\\n\" + \n \"-t: the density threshold.\\n\" + \n \"\\n\" + \n \"Example Usage:\\n\" + \n \"python main.py -i \\\"../data/toy/toy-bowtie.txt\\\" -o \\\"../results/toy/toy-bowtie.txt\\\" -d 0 -v \\\"edge-c\\\" -t .5\" +\n \"\\n\")", "def print_usage():\n\n print \"Usage:\"\n print \" %s sign <priv_key> <cert> <file1 ...>\" % sys.argv[0]\n print \" %s verify <trust_dir> <file1 ...>\" % sys.argv[0]\n sys.exit(1)", "def printUsage():\n print version\n print \"usage: $PROGPATH/python/padutils.py padPath dateStart dateStop sensor abbr='spg' whichAx='s' pm='+' tag='untitled' Nfft=None No=None\"", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def usage(code, msg=''):\n if msg:\n print >> sys.stderr, msg\n print >> sys.stderr\n print >> sys.stderr, __doc__ % globals()\n sys.exit(code)", "def print_usage():\n print(\"Usage:\")\n print(\"\\treplace_placeholders -c configFile [-i inFile] [-o outFile] [-h]\")", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def test_explicit_usage_message(self):\n assert 'Usage:' in main('coloredlogs', '--help', capture=True)", "def printUsage():\n print\n print \"Usage: python FillRegression.py args\"\n print\n print \"where args are property pairs as follows....\"\n print \"IndependentBaseName=XXXX - path to the base filename of independent time series (no .csv or _01.csv at end)\"\n print \"DependentBaseName=XXXX - path to the base filename of dependent time series (no .csv or _01.csv at end)\"\n print \"IndepedentTSID=XXXX - the independent TSID to use in the IndependentTSID column of the statistics table\"\n print \"DepedentTSID=XXXX - the dependent TSID to use in the DependentTSID column of the statistics table\"\n print \"StatisticsFile=XXXX - path to the output statistics file\"\n print \"NumberOfEquations=XXXX - number of equations (1 or 12)\"\n return", "def usage():\n print(\"Usage:\\n python NFLplayerSentiments.py 'QUERY' (remember to include the quotes).\")", "def print_usage_info_screen():\n print \"\"\n print \"Usage: \"\n print \" ./generateButtons.py 'background-color' 'foreground-color'\"\n print \"\"\n print \"Examples: \"\n print \" ./generateButtons.py '#123456' '#ededed'\"\n print \" ./generateButtons.py 'red' 'white'\"\n print \" ./generateButtons.py '#123' 'purple'\"\n print \" ./generateButtons.py '00e' 'fff'\"\n print \" ./generateButtons.py 'ed' 'ff'\"\n print \"\"", "def info():\n print __doc__\n sys.exit(1)", "def print_help():\n\tprint(\"Help text\")", "def help_me():\n print(\"i'm trapped\")", "def print_help():\n print(bcolors.OKBLUE, \" \", \"=\"*80, bcolors.ENDC, sep=\"\")\n print(\"\"\" HELP\n \n No arg: Enter formula and get assembly printed on the screen\n 1 arg : Enter file and get file.asm (excluding the keyword \"help\")\n >2 arg: This screen shows up\n\"\"\")\n print(bcolors.OKBLUE, \" \", \"=\"*80, bcolors.ENDC, sep=\"\", end=\"\\n\\n\")", "def print_usage(arg_processor):\n min_width = max(map(lambda x : len(x),\\\n arg_processor.program_arg_order + arg_processor.program_flag_order))\n print \"Usage: python {} \".format(sys.argv[0])\n print \"The following flags and arguments can be supplied:\"\n print \"Flags:\"\n for flag in arg_processor.program_flag_order:\n print \" {:<{}} : {}\".format(flag, min_width,\n arg_processor.program_flags[flag].description)\n print \"Arguments:\"\n for arg in arg_processor.program_arg_order:\n if arg_processor.program_args[arg].validator != None:\n advice_str = arg_processor.advice_functions[\\\n arg_processor.program_args[arg].validator](\\\n arg_processor.program_args[arg].validator_args)\n else:\n advice_str = \"\"\n if arg_processor.program_args[arg].needed:\n print \" {:<{}} : {}{}\".format(arg, min_width,\n arg_processor.program_args[arg].description, advice_str)\n else:\n print \" {:<{}} : {}{} [optional, default: {}]\".format(arg,\n min_width, arg_processor.program_args[arg].description,\n advice_str, arg_processor.program_args[arg].default_value)\n sys.exit(0)", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def test_cli_help(self):\n output = self.update_command('-h')", "def print_help():\n print('\\nExtract UMI from Linnarrson sam filename, add as a tag, write bam.\\n'\n 'Usage: python3 postprocess_umi_tag.py <linnarrson_file.sam> '\n '<linnarrson_file.bam>\\n')", "def printHelp():\n print(\"League Replay Analyzer v1.0\")\n print(\"Usage: main.py <gameID> [OPTIONS <optional_arguments>]\")\n print(\"Options:\")\n print(\"-t | -T | --tilt tilt-related stats (mute stats, surrender votes, AFKs etc)\")\n print(\"-i | -I | --items item related stats (build, wards, consumables etc)\")\n print(\"-r | -R | --runes rune related stats (build, stats per rune)\")\n print(\"-s | -S | --spells | --spell spell info (times cast per ability, times cast per summ)\")\n print(\"-o | -O | --obj | --objectives objective related stats (obejctives stolen, stolen assists, damage etc)\")\n print(\"-l | -L | --lag lag related info (ping, time spent DC'ed, time from last DC, AFK)\")\n print(\"--latest automatically detect and analyze the last replay that was downloaded\")\n print(\"--custom-path <path> analyze the file at the custom <path>. argument required\")\n print(\"--server <server_name> specify server (default: EUN1)\")\n print(\"-a | -A | --all | --dump print all stats\")\n print(\"-h | -H | --help display this help\\n\")", "def help(self):\n\t\tself.usage()\n\t\tprint \"\\tscreen - XML screen file\"\n\t\tprint \"\\troll - roll name\"\n\t\tsys.exit(0)", "def usage():\n print('Usage: {} node0|node1 URL'.format(sys.argv[0]))\n sys.exit(1)", "def helpMe():\n print('')\n os.system('python2 ' + program + ' -h')\n print('')", "def _usage(args, contents): # pragma: no cover\n sys.exit(0)", "def print_help():\n parser = parsersetup()\n parser.print_help()", "def test_cli_help(run):\n\n out, err, mocked_input = run(dork.cli.main, \"-h\")\n assert \"usage:\" in out\n assert err == \"\"\n assert mocked_input.call_count == 0", "def usage():\r\n print(\"Usage:\", end=' ')\r\n print(\"lab4.py <search directory> <regular expression filter>\")", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "def help():\n print \"Help comes to those who ask\"", "def usage(where=sys.stdout):\n print('Gather Holding IDs via REST using a list of MMS IDs.',\n file=where) \n print('Usage:', file=where)\n print(' west2_gather_Holding_IDs.py <file.txt> <APIKEY>', file=where)\n print('Where:', file=where)\n print(' file.txt List of MSS IDs (one / line)',\n file=where)\n print(' APIKEY API key for accessing Alma REST APIs',\n file=where)\n print('Output:', file=where)\n print(' Generates a datestamped text file: holding-and-mss-ids.<date>.txt',\n file=where)\n print(' consisting of lines holding_id<tab>mms_id', file=where)", "def usage():\n sys.stderr.write(\"\"\"\nUSAGE: %s [options] [file]\nVersion: %s\n\nIf \"-\" is specified as file the data is read from the stdinput.\nA file ending with \".txt\" is considered to be in TIText format,\n'.a43' and '.hex' as IntelHex and all other filenames are\nconsidered as ELF files.\n\nGeneral options:\n -h, --help Show this help screen.\n -c, --comport=port Specify the communication port to be used.\n (Default is 0)\n 0->COM1 / ttyS0\n 1->COM2 / ttyS1\n etc.\n -P, --password=file Specify a file with the interrupt vectors that\n are used as password. This can be any file that\n has previously been used to program the device.\n (e.g. -P INT_VECT.TXT).\n -f, --framesize=num Max. number of data bytes within one transmitted\n frame (16 to 240 in steps of 16) (e.g. -f 240).\n -m, --erasecycles=num Number of mass erase cycles (default is 1). Some\n old F149 devices need additional erase cycles.\n On newer devices it is no longer needed. (e.g. for\n an old F149: -m20)\n -U, --unpatched Do not download the BSL patch, even when it is\n needed. This is used when a program is downloaded\n into RAM and executed from there (and where flash\n programming is not needed.)\n -D, --debug Increase level of debug messages. This won't be\n very useful for the average user...\n -I, --intelhex Force fileformat to IntelHex\n -T, --titext Force fileformat to be TIText\n -N, --notimeout Don't use timeout on serial port (use with care)\n -B, --bsl=bsl.txt Load and use new BSL from the TI Text file\n -S, --speed=baud Reconfigure speed, only possible with newer\n MSP403-BSL versions (>1.5, read slaa089a.pdf for\n details). If the --bsl option is not used, an\n internal BSL replacement will be loaded.\n Needs a target with at least 2kB RAM!\n Possible values are 9600, 19200, 38400\n (default 9600)\n -1, --f1x Specify CPU family, in case autodetect fails\n -2, --f2x Specify CPU family, in case autodetect fails\n -4, --f4x Specify CPU family, in case autodetect fails\n --F1x and --f2x are only needed when the \"change\n baudrate\" feature is used and the autodetect feature\n fails. If the device ID that is uploaded is known, it\n has precedence to the command line option.\n --invert-reset Invert signal on RST pin (used for some BSL hardware)\n --invert-test Invert signal on TEST/TCK pin (used for some BSL\n hardware)\n --swap-reset-test Swap the RST and TEST pins (used for some BSL hardware)\n --telos-latch Special twiddle in BSL reset for Telos hardware\n --telos-i2c DTR/RTS map via an I2C switch to TCK/RST in Telos Rev.B\n --telos Implies options --invert-reset, --invert-test,\n --swap-reset-test, and --telos-latch\n --telosb Implies options --swap-reset-test, --telos-i2c,\n --no-BSL-download, and --speed=38400\n --goodfet10\n --goodfet20\n --goodfet30\n --tmote Identical operation to --telosb\n --z1 Bootstrap a Z1\n --no-BSL-download Do not download replacement BSL (disable automatic)\n --force-BSL-download Download replacement BSL even if not needed (the one\n in the device would have the required features)\n --slow Add delays when operating the control pins. Useful if\n the pins/circuit has high capacitance.\n\nProgram Flow Specifiers:\n -e, --masserase Mass Erase (clear all flash memory)\n -E, --erasecheck Erase Check by file\n -p, --program Program file\n -v, --verify Verify by file\n\nThe order of the above options matters! The table is ordered by normal\nexecution order. For the options \"Epv\" a file must be specified.\nProgram flow specifiers default to \"pvr\" if a file is given.\nDon't forget to specify \"e\" or \"eE\" when programming flash!\n\nData retrieving:\n -u, --upload=addr Upload a datablock (see also: -s).\n -s, --size=num Size of the data block do upload. (Default is 2)\n -x, --hex Show a hexadecimal display of the uploaded data.\n (Default)\n -b, --bin Get binary uploaded data. This can be used\n to redirect the output into a file.\n\nDo before exit:\n -g, --go=address Start program execution at specified address.\n This implies option --wait.\n -r, --reset Reset connected MSP430. Starts application.\n This is a normal device reset and will start\n the program that is specified in the reset\n vector. (see also -g)\n -w, --wait Wait for <ENTER> before closing serial port.\n\nIf it says \"NAK received\" it's probably because you specified no or a\nwrong password.\n\"\"\" % (sys.argv[0], VERSION))", "def help(self):\n res = \"\"", "def test_usage(clickrunner):\n result = clickrunner.invoke(maincli.entrypoint)\n assert result.exit_code == 0\n assert \"Usage\" in result.output\n for valid_subcmd in maincli.valid_subcmds:\n assert valid_subcmd in result.output\n for invalid_subcmd in maincli.invalid_subcmds:\n assert invalid_subcmd not in result.output", "def print_help(self):\n print self.get_help()", "def usage():\r\n print 'Usage: collect_logs.py testrun \"env1, env2,...\" \"oscounters, applogs, gclogs, traces\"'\r\n print \"testrun = Name of the test execution e.g. 12032713\"\r\n print \"environments = Name of the environment defined in the environments.ini file\"\r\n print \"logtypes = List types of logfiles to collect\"" ]
[ "0.810532", "0.810532", "0.80087817", "0.7974354", "0.7823985", "0.7744431", "0.7735299", "0.760537", "0.76011324", "0.76011324", "0.7589592", "0.7553557", "0.7550795", "0.75118643", "0.7472906", "0.746817", "0.7346284", "0.7329634", "0.7301601", "0.7280952", "0.7278727", "0.7260191", "0.72328395", "0.72024983", "0.7202459", "0.7194138", "0.7190348", "0.7183997", "0.7168461", "0.7166564", "0.71477795", "0.7147055", "0.710746", "0.7099631", "0.7089686", "0.70865035", "0.707984", "0.70783854", "0.7077015", "0.70708734", "0.70622253", "0.705376", "0.705376", "0.70417804", "0.70415634", "0.7037306", "0.703558", "0.70335865", "0.70244294", "0.7013945", "0.70040816", "0.7001593", "0.70002955", "0.6994818", "0.699411", "0.69566846", "0.6946753", "0.6944868", "0.6935697", "0.6899564", "0.6896624", "0.68924826", "0.68895125", "0.68543535", "0.6849991", "0.6842363", "0.68355376", "0.6832953", "0.6832556", "0.68209743", "0.6811869", "0.68107444", "0.6806402", "0.67924994", "0.6788281", "0.67817056", "0.67671084", "0.6756762", "0.6753851", "0.6747824", "0.6737409", "0.67372936", "0.67369926", "0.67338485", "0.67197824", "0.6717039", "0.670479", "0.66998947", "0.6699858", "0.66951513", "0.6694309", "0.6669279", "0.66687006", "0.6666491", "0.66549087", "0.6650367", "0.6647119", "0.66357017", "0.663519", "0.6629995" ]
0.8339372
0
Run a module as a script. Locates the module's file and runs it in the current interpreter, or optionally a debugger.
def RunScriptModule(module): args = sys.argv[1:] debug_binary = False debugger = 'gdb --args' debug_script = False show_command_and_exit = False while args: if args[0] == '--helpstub': PrintOurUsage() sys.exit(0) if args[0] == '--debug_binary': debug_binary = True args = args[1:] continue if args[0] == '--debug_script': debug_script = True args = args[1:] continue if args[0] == '--show_command_and_exit': show_command_and_exit = True args = args[1:] continue matchobj = re.match('--debugger=(.+)', args[0]) if matchobj is not None: debugger = StripQuotes(matchobj.group(1)) args = args[1:] continue break # Now look for my main python source file # TODO(dborowitz): This will fail if the module was zipimported, which means # no egg depending on this script runner can be zip_safe. main_filename = module.__file__ assert os.path.exists(main_filename), ('Cannot exec() %r: file not found.' % main_filename) assert os.access(main_filename, os.R_OK), ('Cannot exec() %r: file not' ' readable.' % main_filename) args = [main_filename] + args if debug_binary: debugger_args = debugger.split() program = debugger_args[0] # If pathname is not absolute, determine full path using PATH if not os.path.isabs(program): program = FindEnv(program) python_path = sys.executable command_vec = [python_path] if debug_script: command_vec.extend(GetPdbArgs(python_path)) args = [program] + debugger_args[1:] + command_vec + args elif debug_script: args = [sys.executable] + GetPdbArgs(program) + args else: program = sys.executable args = [sys.executable] + args if show_command_and_exit: print 'program: "%s"' % program print 'args:', args sys.exit(0) try: sys.stdout.flush() os.execv(program, args) except EnvironmentError as e: if not getattr(e, 'filename', None): e.filename = program # Add info to error message raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_python_script(package=None, module=None, args=[], p_args=[]):\n assert module is not None\n assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))\n path = python_script_exists(package, module)\n run_program(sys.executable, p_args + [path] + args)", "def run_script(extension_invocation_info):\n acm.RunModuleWithParameters(__name__, acm.GetDefaultContext())", "def exec_module(self, module):\n\n if not self.filename.endswith(config.FILE_EXT) and not self.filename.endswith(\n \"__init__.py\"\n ):\n print(\"Fatal error: ExtensionLoader is asked to load a normal file.\")\n print(\"filename:\", self.filename)\n print(\"Expected extension:\", config.FILE_EXT)\n raise SystemExit\n\n name = module.__name__\n if module.__name__ == config.MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n config.MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n transforms.identify_requested_transformers(source)\n\n if config.TRANSFORMERS:\n original = source\n source = transforms.add_all_imports(source)\n source = transforms.apply_source_transformations(source)\n\n if config.DIFF and original != source:\n self.write_html_diff(name, original, source)\n\n if config.CONVERT and self.filename.endswith(config.FILE_EXT):\n print(\"############### Original source: ############\\n\")\n print(original)\n print(\"\\n############### Converted source: ############\\n\")\n print(source)\n print(\"=\" * 50, \"\\n\")\n\n source = transforms.apply_ast_transformations(source)\n exec(source, vars(module))", "def module_runner(module):\n task_queue.put(1)\n result = sys.modules[module].run()\n task_queue.get()\n store_module_result(result) # Store the result in our repo", "def exec_module(self, module):\n pass", "def start(data_file, script_file=None):\n\n # Try to start up the interpreter\n try:\n initialize(data_file)\n except blotish.BlotishError, err:\n blot_common.print_blot_error(err)\n return\n\n # Maybe run a script\n exit_flag = False\n if script_file:\n exit_flag = execute_file(script_file)\n\n # Start the interpreter unless the script called exit\n if not exit_flag:\n global interpreter\n interpreter.cmdloop()\n\n # Cleanup\n finalize()", "def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))", "def _run_script(fullname):\n name = posixpath.basename(fullname)\n if name[-3:] == '.py':\n name = name[:-3] # strip .py extension\n\n modname = [string.join(fullname.split('/')[0:-1],'/')]\n trylist = ((name, None), (name+'.py', None),\n (name, modname), (name+'.py', modname))\n\n # look for the modulate in standard locations, load it if you\n # find it, otherwise return 1\n for fname, path in trylist:\n try:\n if path:\n fp, pathname, description = imp.find_module(fname, path)\n else:\n fp, pathname, description = imp.find_module(fname)\n except ImportError:\n fp = None\n if fp:\n sys.argv[0] = pathname\n try:\n mod = imp.load_module('__main__', fp, pathname, description)\n finally:\n fp.close()\n return 1\n return 0", "def load_script_as_module(script_name):\n spec = create_script_spec(script_name)\n script = module_from_spec(spec)\n spec.loader.exec_module(script)\n\n return script", "def execute_module(self, module, *args, **opts):\n module_file = module.__file__\n if module_file.endswith('.pyc'):\n module_file = module_file[:-1]\n cmd = [self._path]\n if 'python_options' in opts:\n cmd.extend(opts['python_options'])\n del opts['python_options']\n cmd.append(module_file)\n cmd.extend(args)\n return get_cmd_output(*cmd, **opts)", "def run_file(filename, logfile=None, execdir=None):\n if not runpy_available: #pragma:nocover\n raise pyutilib.common.ConfigurationError(\"Cannot apply the run_file() function because runpy is not available\") \n #\n # Open logfile\n #\n if not logfile is None:\n sys.stderr.flush()\n sys.stdout.flush()\n save_stdout = sys.stdout\n save_stderr = sys.stderr\n OUTPUT=open(logfile,\"w\")\n sys.stdout=OUTPUT\n sys.stderr=OUTPUT\n #\n # Add the file directory to the system path\n #\n if '/' in filename:\n tmp= \"/\".join((filename).split(\"/\")[:-1])\n tmp_import = (filename).split(\"/\")[-1]\n sys.path.append(tmp)\n elif '\\\\' in filename:\n tmp = \"\\\\\".join((filename).split(\"\\\\\")[:-1])\n tmp_import = (filename).split(\"\\\\\")[-1]\n sys.path.append(tmp)\n else:\n tmp_import = filename\n name = \".\".join((tmp_import).split(\".\")[:-1])\n #\n # Run the module\n #\n try:\n if not execdir is None:\n tmp=os.getcwd()\n os.chdir(execdir)\n tmp_path = sys.path\n sys.path = [execdir] + sys.path\n runpy.run_module(name,None,\"__main__\")\n if not execdir is None:\n os.chdir(tmp)\n sys.path = tmp_path\n except Exception: #pragma:nocover\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr\n raise\n #\n # Close logfile\n #\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr", "def _run_script(fullname):\n name = posixpath.basename(fullname)\n if name[-3:] == '.py':\n name = name[:-3] # strip .py extension\n\n modname = [string.join(fullname.split('/')[0:-1],'/')]\n trylist = ((name, None), (name+'.py', None),\n (name, modname), (name+'.py', modname))\n\n # look for the module in standard locations, load it if you\n # find it, otherwise return 1\n for fname, path in trylist:\n try:\n if path:\n fp, pathname, description = imp.find_module(fname, path)\n else:\n fp, pathname, description = imp.find_module(fname)\n except ImportError:\n fp = None\n if fp:\n sys.argv[0] = pathname\n try:\n mod = imp.load_module('__main__', fp, pathname, description)\n finally:\n fp.close()\n return 1\n return 0", "def do_run(self, line: str):\n if self._real_module is None:\n print(\"'run' command depends on using a module. See 'use' for help.\")\n return\n\n self._real_module.run()", "def modExec(module):\n modName = module.split('_')[-1]\n if \"live\" in module:\n dn = '{0} (live)'.format(modName.upper())\n else:\n dn = '{0}'.format(modName.upper())\n\n try:\n modStart = datetime.utcnow()\n log.info(\"Running {0}\".format(dn))\n modImport = 'modules.' + module\n\n import_module(modImport)\n\n modOutput = [i for i in glob.glob(outputdir + '/*') if all(p in i for p in [modName, runID])]\n try:\n arch = [archive.add_file(os.path.basename(outfile)) for outfile in modOutput]\n except IndexError:\n pass\n\n modEnd = datetime.utcnow()\n modRuntime = modEnd - modStart\n log.debug(\"{0} finished in {1}.\".format(dn, modRuntime))\n\n except KeyboardInterrupt:\n sys.stdout.write('\\r')\n sys.stdout.flush()\n log.error(\"{0} was killed. \".format(module))\n\n except Exception:\n log.error(\"{0} failed: {1}\".format(module, [traceback.format_exc()]))", "def run_script(script_file: str, config_file: str, **kwargs: Any) -> None:\n # Add config path and current working directory to sys.path to correctly load the configuration\n script_filepath = Path(script_file)\n config_filepath = Path(config_file)\n sys.path.insert(0, script_filepath.resolve().parent.as_posix())\n sys.path.insert(0, config_filepath.resolve().parent.as_posix())\n sys.path.insert(0, os.getcwd())\n\n module = load_module(script_filepath)\n _check_script(module)\n\n run_fn = module.__dict__[\"run\"]\n\n # Lazy setup configuration\n config = ConfigObject(config_filepath, script_filepath=script_filepath)\n\n run_fn(config, **kwargs)", "def runScriptAtPath(path):\n \n sys.argv = [path]\n for arg in PytoClasses.Python.shared.args:\n sys.argv.append(str(arg))\n \n def run() -> None:\n os.system = PytoClasses.Python.shared.system\n directory = os.path.expanduser(os.path.dirname(path))\n sys.path.insert(0, directory)\n try:\n global __script__\n spec = importlib.util.spec_from_file_location(\"__main__\", path)\n __script__ = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(__script__)\n PytoClasses.Python.shared.values = [item for item in dir(__script__) if not item.startswith(\"__\")]\n except SystemExit:\n print(\"SystemExit\")\n except Exception as e:\n \n exc_type, exc_obj, exc_tb = sys.exc_info()\n \n extracts = traceback.extract_tb(sys.exc_info()[2])\n count = len(extracts)\n \n lineNumber = -1\n \n fileName = path\n for i, extract in enumerate(extracts):\n if extract[0] == fileName:\n lineNumber = extract[1]\n break\n count -= 1\n \n if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number\n lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1]\n \n PytoClasses.Python.shared.errorType = exc_type.__name__\n PytoClasses.Python.shared.errorReason = str(e)\n PytoClasses.EditorViewController.visible.showErrorAtLine(lineNumber)\n \n print(traceback.format_exc(limit=-count))\n \n sys.path.remove(directory)\n\n PytoClasses.ReviewHelper.shared.launches = PytoClasses.ReviewHelper.shared.launches+1\n PytoClasses.ReviewHelper.shared.requestReview()\n PytoClasses.Python.shared.isScriptRunning = False\n \n thread = threading.Thread(target=run, args=())\n \n def loop():\n while PytoClasses.Python.shared.isScriptRunning:\n time.sleep(1)\n ignoredThreads.append(thread)\n raise Exception(\"Stopped script!\")\n \n def runLoop():\n try:\n loop()\n except:\n pass\n\n\n thread.start()\n\n runLoop()\n return __script__", "def run_module(self, path):\n\n module = self.import_module(path)\n result = None\n\n if module:\n try:\n result = module.run()\n except AttributeError:\n self.error('Error Running Module: Missing run() method.')\n except Exception:\n e = sys.exc_info()[1]\n traceback = sys.exc_info()[2]\n self.warning('Exeption caught in module: {0} line: {1}'.format(\n e,\n traceback.tb_lineno))\n self.calls.append({path: result})\n state.save_hook_call(path, result)\n return result", "def run_execute_file(file_path, globals=None, locals=None):\n if globals is None:\n globals = {}\n globals.update({\n \"__file__\": file_path,\n \"__name__\": \"__main__\",\n })\n with open(file_path, 'rb') as file:\n exec(compile(file.read(), file_path, 'exec'), globals, locals)", "def main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('code', help='Python code to execute')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-3', action='store_const', dest='python',\n const='python3', help='Explicitly use Python 3')\n group.add_argument('-2', action='store_const', dest='python',\n const='python2', help='Explicitly use Python 2')\n group.add_argument('-p', '--python', help='Specify python interpreter')\n args = parser.parse_args()\n if args.python is not None:\n call([args.python, __file__, args.code])\n else:\n InteractiveInterpreter(LocalsImportDict()).runsource(args.code)", "def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n print script_path\n execfile(script_path, globals_)", "def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()", "def load_script(filename):\n path, module_name, ext = _extract_script_components(filename)\n add_search_path(path)\n return importlib.import_module(module_name)\n # return _load_module(module_name)", "def run_zxpy(filename: str, module: ast.Module) -> None:\n patch_shell_commands(module)\n exec(compile(module, filename, mode='exec'))", "def run_example_debug_cmd(example_module_name, example_argv):\n return run_example_debug(example_module_name, example_argv)", "def do_pyscript(self, arg, opts=None):\n if not arg:\n self.perror(\"pyscript command requires at least 1 argument ...\", traceback_war=False)\n self.do_help('pyscript')\n return\n\n if not USE_ARG_LIST:\n arg = shlex.split(arg, posix=POSIX_SHLEX)\n\n # Get the absolute path of the script\n script_path = os.path.expanduser(arg[0])\n\n # Save current command line arguments\n orig_args = sys.argv\n\n # Overwrite sys.argv to allow the script to take command line arguments\n sys.argv = [script_path]\n sys.argv.extend(arg[1:])\n\n # Run the script - use repr formatting to escape things which need to be escaped to prevent issues on Windows\n self.do_py(\"run({!r})\".format(script_path))\n\n # Restore command line arguments to original state\n sys.argv = orig_args", "def main(args):\n module = args.module\n\n if args.step not in STEP_OPTIONS:\n raise ValueError(\n f\"{args.step} is an unknown option. Your options are {STEP_OPTIONS}.\"\n )\n\n if module == \"structure_plan\":\n run_module_structure_plan(args)\n elif module == \"floor_plan\":\n run_module_floor_plan(args)\n elif module == \"complete_floorplan\":\n run_module_complete_floorplan(args)\n elif module == \"ground_plan\":\n run_module_ground_plan(args)\n elif module == \"text_to_gdf\":\n run_module_text_to_gdf(args)\n else:\n raise ValueError(\n f\"{module} is an unknown option. Your options are {MODULE_OPTIONS}.\"\n )", "def run():\n print('')\n\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir('tools/' + CHANGEME_GITNAME)\n if sop.debug.lower() == 'y':\n comm.runCommand('python2 ' + program + ' -s ' + sop.ip + ' -d', 'ChangeMe')\n else:\n comm.runCommand('python2 ' + CHANGEME_GITRUN + ' -s ' + sop.ip, 'ChangeMe')\n os.chdir(dname)", "def execute_module(self):\n raise NotImplementedError", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def exec_module(cls, *args, **kwargs): # real signature unknown\n pass", "def call_script(self, script):\n filename, callable = script.rsplit(':', 1)\n filename = os.path.abspath(filename)\n module = imp.load_source('script', filename)\n script = getattr(module, callable.strip())\n\n try:\n script(self.options, self.buildout, self.augmented_environment())\n except TypeError:\n # BBB: Support hook scripts that do not take the environment as\n # the third parameter\n script(self.options, self.buildout)", "def setup_module(module):\n print(\"Start rishabhSetupModule of Program\")", "def __launch_python_module(path, cmd, args):\n\n mod_class = None\n mod_inst = None\n\n # We should always be in TOP\n if prop.TOP is not None:\n os.chdir(prop.TOP)\n\n # Next, get the path setup.\n if __update_path() != 0:\n log.e(TAG, \"Unable to update library path!\")\n return -7\n\n # If we got here, we try to load as a python module.\n module = imp.load_source(cmd, path)\n\n if module is None:\n log.e(TAG, \"Error launching module '%s'.\" % cmd)\n return -5\n\n try:\n mod_class = getattr(module, cmd)\n mod_inst = mod_class()\n\n except AttributeError:\n log.e(TAG, \"Unable to find class '%s' in module!\" % cmd)\n return -6\n\n return mod_inst.run(args)", "def run_script(input_file, run_dir, script_name, interpreter='python'):\n from paver.runtime import sh\n from paver.path import path\n docdir = path(input_file).dirname()\n output_text = sh('cd %(docdir)s/%(run_dir)s;%(interpreter)s %(script_name)s 2>&1' % vars(),\n capture=True)\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\n response += '\\n\\t'.join(output_text.splitlines())\n while not response.endswith('\\n\\n'):\n response += '\\n'\n return response", "def run_script(self, pathname, caller=None):\n self.msg(2, \"run_script\", pathname)\n\n pathname = os.path.realpath(pathname)\n m = self.findNode(pathname)\n if m is not None:\n return m\n\n if sys.version_info[0] != 2:\n with open(pathname, 'rb') as fp:\n encoding = util.guess_encoding(fp)\n\n with open(pathname, _READ_MODE, encoding=encoding) as fp:\n contents = fp.read() + '\\n'\n if contents.startswith(BOM):\n # Ignore BOM at start of input\n contents = contents[1:]\n\n else:\n with open(pathname, _READ_MODE) as fp:\n contents = fp.read() + '\\n'\n\n co_ast = compile(contents, pathname, 'exec', ast.PyCF_ONLY_AST, True)\n co = compile(co_ast, pathname, 'exec', 0, True)\n m = self.createNode(Script, pathname)\n self._updateReference(caller, m, None)\n self._scan_code(m, co, co_ast)\n m.code = co\n if self.replace_paths:\n m.code = self._replace_paths_in_code(m.code)\n return m", "def magic_run(self, parameter_s =''):\n\n # get arguments and set sys.argv for program to be run.\n opts,arg_lst = self.parse_options(parameter_s,'nipd:l:rs:t:',\n mode='list',list_all=1)\n\n try:\n filename = get_py_filename(arg_lst[0])\n except IndexError:\n warn('you must provide at least a filename.')\n print '\\n@run:\\n',inspect.getdoc(self.magic_run)\n return\n except IOError,msg:\n warn(msg)\n return\n\n save_argv = sys.argv # save it for later restoring\n # perform shell-like expansions on the argument list before passing it\n # to programs\n xvars = os.path.expandvars\n xuser = os.path.expanduser\n xpand = lambda s: xvars(xuser(s))\n sys.argv = [xpand(arg) for arg in arg_lst] \n\n if opts.has_key('i'):\n prog_ns = self.shell.user_ns\n else:\n name = opts.has_key('n') and __name__ or '__main__'\n prog_ns = {'__name__':name}\n\n stats = None\n try:\n if opts.has_key('p'):\n cmd = parameter_s.split()[:-1]\n stats = self.magic_prun('',0,opts,arg_lst,prog_ns)\n else:\n self.shell.safe_execfile(filename,prog_ns,prog_ns)\n if not opts.has_key('i'):\n # update IPython interactive namespace\n self.user_ns.update(prog_ns)\n finally:\n sys.argv = save_argv\n return stats", "def exec_file(filename, globals=None, locals=None):\n if globals is None:\n globals = {}\n if locals is None:\n locals = globals\n locals['__file__'] = filename\n from py import path\n from _pytest import config\n from _pytest.assertion import rewrite\n f = path.local(filename)\n config = config._prepareconfig([], [])\n source_stat, code = rewrite._rewrite_test(config, f)\n logger.debug('filename: {} source_stat: {} code: {}'.format(filename, source_stat, code))\n exec(code, globals, locals)", "def run_script(self, filename=None, silent=False, set_focus=False):\r\n if filename is None:\r\n self.shell.restore_stds()\r\n filename = QFileDialog.getOpenFileName(self,\r\n self.tr(\"Run Python script\"), os.getcwdu(),\r\n self.tr(\"Python scripts\")+\" (*.py ; *.pyw)\")\r\n self.shell.redirect_stds()\r\n if filename:\r\n filename = unicode(filename)\r\n os.chdir( os.path.dirname(filename) )\r\n filename = os.path.basename(filename)\r\n self.emit(SIGNAL(\"refresh()\"))\r\n else:\r\n return\r\n command = \"execfile(%s)\" % repr(osp.abspath(filename))\r\n if set_focus:\r\n self.shell.setFocus()\r\n if self.dockwidget and not self.ismaximized:\r\n self.dockwidget.setVisible(True)\r\n self.dockwidget.raise_()\r\n if silent:\r\n self.shell.write(command+'\\n')\r\n self.shell.run_command(command)\r\n else:\r\n self.shell.write(command)", "def run_as_script(scenario_path=None):\n import cea.globalvar\n gv = cea.globalvar.GlobalVariables()\n\n if scenario_path is None:\n scenario_path = gv.scenario_reference\n\n locator = cea.inputlocator.InputLocator(scenario_path=scenario_path)\n weather_file = locator.get_default_weather()\n moo_optimization(locator=locator, weather_file= weather_file, gv=gv)\n\n print 'test_optimization_main() succeeded'", "def run_main():\n main(sys.argv)", "def _run_file(file_path, globals_):\n script_name = os.path.basename(file_path)\n\n sys.path = (_PATHS.script_paths(script_name) +\n _PATHS.scrub_path(script_name, sys.path))\n\n fix_google_path()\n\n execfile(_PATHS.script_file(script_name), globals_)", "def script_run(ctx: click.Context, name, script_arguments):\n subcommand_script.cmd_run(ctx.obj, name, script_arguments)", "def launch_local_module(root, cmd, args):\n\n module_path = \"%s/local_modules/%s\" % (root, cmd)\n\n # If we are dealing with a bash script, just run and exit.\n if pm.is_bash_module(module_path):\n log.d(TAG, \"This is a bash module!\")\n\n return __launch_bash_module(module_path, args)\n\n return __launch_python_module(module_path, cmd, args)", "def test_simple_as_file_in_module():\n # Spawn the simple testbed app using `app.py`\n cwd = Path(__file__).parent / \"testbed\" / \"simple\"\n output = run_app([\"app.py\"], cwd=cwd)\n assert_paths(output, app_path=Path(toga.__file__).parent, app_name=\"simple-app\")", "def run(self, script, *args, **kwargs):\n return self._run('run', script, *args, **kwargs)", "def run_module(self, module_name, args=[], kwargs={}):\n if not module_loader.has_plugin(module_name):\n raise UnsupportedAnsibleModule(\"Unsupported ansible module \\\"{}\\\"\".format(module_name))\n self.module_name = module_name\n\n previous_frame = inspect.currentframe().f_back\n caller_info = inspect.getframeinfo(previous_frame)\n kwargs.update({\"caller_info\": caller_info})\n\n return self._run_ansible_module(*args, **kwargs)", "def test_simple_as_module():\n # Spawn the simple testbed app using `-m app`\n cwd = Path(__file__).parent / \"testbed\" / \"simple\"\n output = run_app([\"-m\", \"app\"], cwd=cwd)\n assert_paths(output, app_path=Path(toga.__file__).parent, app_name=\"simple-app\")", "def main():\n\n global config_params\n global module_prefix\n global debug\n\n #######################################################################\n #\n # OPTIONS\n #\n #######################################################################\n\n # Swallow the options\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"d\", [])\n except getopt.GetoptError, e:\n print >> sys.stderr, 'Incorrect option.'\n print >> sys.stderr, main.__doc__\n sys.exit(2)\n\n # Parse the options\n for optstr, value in opts:\n # Debug option\n if optstr == \"-d\":\n config_params['debug'] = '1'\n debug = 1\n\n # Check that there are additional arguments\n if len(args) == 0:\n print >> sys.stderr, 'Script needs additional parameters'\n sys.exit(1)\n\n if debug:\n print >> sys.stderr, 'Options: ', args\n\n #######################################################################\n #\n # MAIN PROCESSING\n #\n #######################################################################\n incorrect_file = next((x for x in args if not(os.path.isfile(x))), None)\n if incorrect_file != None:\n print >> sys.stderr, 'File', incorrect_file, 'not found'\n sys.exit(1)\n\n config_params['files'] = ' '.join(args)\n\n rule_manager.options = rule_manager.initial_config({})\n section_name = module_prefix\n try:\n rule_manager.options.add_section(section_name)\n except ConfigParser.DuplicateSectionError:\n pass\n\n for (vn, vv) in config_params.items():\n rule_manager.set_property(None, section_name, vn, vv,\n createRule = True, createOption = True)\n\n initialize(module_prefix)\n\n execute(module_prefix)", "def exec_python_script(self, filepath=False, script_txt=False):\n if script_txt is False and type(filepath) is str:\n with open(filepath, 'r') as file_:\n script_txt = file_.read()\n \n elif type(script_txt) is str and filepath is False:\n filepath = \"inline-script\"\n \n else:\n SystemError(\"'exec_python_script' function used incorrectly!\"\n +\" Choose either script_txt or filepath\")\n\n # Declare all the variables in the global scope so the user can use them\n _vars = {var_name: getattr(self, var_name) for var_name in self.variables}\n\n\n # Run the script in a try loop\n try:\n exec(script_txt, _vars)\n except Exception as e:\n err_msg = repr(e)\n if hasattr(e, 'txt'):\n err_msg = \"Error in your python code.\\n\\n\"+f\"Script: {filepath}\" + \"\\n\"\n if hasattr(e, \"lineno\"):\n err_msg += f\"Bad Line: {e.text}\" + \"\\n\" + f\"Line Num: {e.lineno}\"\n err_msg += \"\\nError Msg: \" + f\"{e.msg}\"\n\n ltxt = script_txt.split(\"\\n\")\n if hasattr(e, \"lineno\"):\n ltxt[e.lineno-1] += \" <------- BAD LINE\"\n err_msg += \"\\n\\n\\n\\n\\n\\nPython Script:\\n\" + '\\n'.join(ltxt)\n\n self.print_error(err_msg)\n\n for var_name in _vars:\n setattr(self, var_name, _vars[var_name])\n if var_name not in self.variables: self.variables.append(var_name)", "def launch_builtin_module(cmd, args):\n\n launch_path = \"%s/core/cmds/%s.py\" % (utils.get_pydtf_dir(), cmd)\n\n return __launch_python_module(launch_path, cmd, args)", "def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))", "def do_run(self, args):\n logger.debug(\"do_run() was called\")\n\t\n parser = CrispyArgumentParser(description=self.do_run.__doc__, prog=\"run\")\n parser.add_argument(\"module\", metavar=\"<module>\", help=\"module name\")\n parser.add_argument(\"session_id\", metavar=\"<session id>\", help=\"session to run on\")\n parser.add_argument(\"arguments\", nargs=argparse.REMAINDER, metavar=\"<arguments>\", help=\"module arguments\")\n \n try:\n pargs = parser.parse_args(shlex.split(args))\n except MyParserException as e:\n print e\n return\n\n try:\n target = self.srv.get_client(int(pargs.session_id))\n except Exception as e:\n fprint.error(\"Session id should be an integer.\")\n return\n\n if not target:\n fprint.error(\"Improper session id.\")\n return\n\n try:\n mod = self.srv.get_module(pargs.module)(target)\n except Exception as me:\n fprint.error(\"Error loading \\\"{}\\\" module: {}\".format(pargs.module, me))\n return\n\n try:\n margs = mod.check_args(pargs.arguments)\n except MyParserException as e:\n print e\n return\n\n try:\n target.run_module(mod, margs)\n except Exception as e:\n fprint.error(\"Error running module: {}\".format(e))\n return", "def run_script(script_path, cwd='.'):\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n try:\n proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd) # nosec\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n f'Hook script failed (exit status: {exit_status})'\n )\n except OSError as err:\n if err.errno == errno.ENOEXEC:\n raise FailedHookException(\n 'Hook script failed, might be an empty file or missing a shebang'\n ) from err\n raise FailedHookException(f'Hook script failed (error: {err})') from err", "def run_exec_command(command):\n if not is_debug_environment():\n import cli\n return cli.cli(command)", "def test_load_simple_module():\n loader = Loader()\n main_fname = loader.load(\"https://gist.githubusercontent.com/miohtama/80391980c2e73b285cfe/raw/dd89a55497ba33a6014453d9bb7432ab424c01cf/kivyhello.py#main\")\n mod = path_to_mod_name(main_fname)\n result = loader.run(mod, \"hello\")\n assert result == \"Hello there\"\n loader.close()", "def run_gdb_with_script(binary='', core='', pybefore=None, pyafter=None):\n pybefore = ([pybefore] if isinstance(pybefore, str) else pybefore) or []\n pyafter = ([pyafter] if isinstance(pyafter, str) else pyafter) or []\n\n command = ['gdb', '--silent', '--nx', '--nh']\n \n for cmd in pybefore:\n command += ['--eval-command', cmd]\n\n command += ['--command', 'gdbinit.py']\n\n if binary:\n command += [binary]\n\n if core:\n command += ['--core', core]\n\n for cmd in pyafter:\n command += ['--eval-command', cmd]\n\n command += ['--eval-command', 'quit']\n\n print(\"Launching command: %s\" % command)\n output = subprocess.check_output(command, stderr=subprocess.STDOUT)\n\n # Python 3 returns bytes-like object so lets have it consistent\n output = codecs.decode(output, 'utf8')\n\n # The pwndbg banner shows number of loaded commands, it might differ between\n # testing environments, so lets change it to ###\n output = re.sub(r'loaded [0-9]+ commands', r'loaded ### commands', output)\n\n return output", "def main(raw_args):\n parser = argparse.ArgumentParser()\n parser.add_argument('--module-link')\n parser.add_argument('module_path', type=os.path.realpath)\n args = parser.parse_args(raw_args)\n\n return md_module(\n load_module_from_path(args.module_path), module_link=args.module_link)", "def launch_module(cmd, args, redirect=False):\n\n module_path = \"%s/%s\" % (DTF_MODULES_DIR, cmd)\n\n # If the caller explicitly asked to save stdout, lets do it.\n if redirect:\n captured_f = cStringIO.StringIO()\n\n with stdout_redirector(captured_f):\n\n if pm.is_bash_module(module_path):\n rtn = __launch_bash_module(module_path, args)\n else:\n rtn = __launch_python_module(module_path, cmd, args)\n\n out = captured_f.getvalue()\n captured_f.close()\n\n return out, rtn\n\n else:\n # If we are dealing with a bash script, just run and exit.\n if pm.is_bash_module(module_path):\n return __launch_bash_module(module_path, args)\n return __launch_python_module(module_path, cmd, args)", "def run_script(input_file, script_name, interpreter='python'):\r\n from paver.easy import sh\r\n from paver.path import path\r\n rundir = path(input_file).dirname()\r\n output_text = sh('cd %(rundir)s && %(interpreter)s %(script_name)s 2>&1' % vars(), capture=True)\r\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\r\n response += '\\n\\t'.join(output_text.splitlines())\r\n while not response.endswith('\\n\\n'):\r\n response += '\\n'\r\n return response", "def __launch_bash_module(module_path, args):\n\n cmd = list()\n\n # Build the command string\n cmd = [module_path] + args\n\n # Update the environment\n new_env = os.environ\n\n # These are used for sourcing\n new_env['DTF_LOG'] = DTF_INCLUDED_DIR + \"/dtf_log.sh\"\n new_env['DTF_CORE'] = DTF_INCLUDED_DIR + \"/dtf_core.sh\"\n\n # We need to be in TOP to get the serial.\n os.chdir(prop.TOP)\n\n # We want the serial to be already set\n serial = prop.get_prop('Info', 'serial')\n new_env['ANDROID_SERIAL'] = serial\n\n try:\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=new_env)\n except OSError:\n log.e(TAG, \"Unable to execute '%s'. Are the permission flags correct?\"\n % module_path)\n return -5\n\n lines_iterator = iter(popen.stdout.readline, b\"\")\n\n for line in lines_iterator:\n sys.stdout.write(line)\n\n return popen.returncode", "def test_script(self) -> None:\n main()", "def runScript(*args, **kwargs):\n env = os.environ.copy()\n env['PYTHONPATH'] = os.pathsep.join(sys.path)\n return chromium_utils.RunCommand(*args, env=env, **kwargs)", "def test_script(self):\n self.script(\"# script.py\\n\"\n \"a = 2\\n\"\n \"# other\")\n self.compile()\n\n script = self.find_code_component(name=\"script.py\")\n self.assertEqual(script.type, \"script\")\n self.assertEqual(script.mode, \"w\")\n self.assertEqual(script.first_char_line, 1)\n self.assertEqual(script.first_char_column, 0)\n self.assertEqual(script.last_char_line, 3)\n self.assertEqual(script.last_char_column, 7)\n self.assertEqual(script.container_id, -1)\n\n script_block = self.metascript.code_blocks_store[script.id]\n self.assertEqual(script_block.code, \"# script.py\\na = 2\\n# other\")\n self.assertEqual(script_block.docstring, \"\")\n self.assertTrue(bool(script_block.code_hash))", "def run(filename, verbose, debug):\n pass", "def test_module_doc():\r\n\r\n for fname in os.listdir('.'):\r\n if fname.endswith('.py'):\r\n f = fname.split('.')[0]\r\n print 'Executing ', fname\r\n execfile(fname, locals())", "def PyHiew_ExecuteScript(script, g, strip_path = False):\r\n PY_COMPILE_ERR = None\r\n try:\r\n execfile(script, g)\r\n except Exception, e:\r\n PY_COMPILE_ERR = str(e) + \"\\n\" + traceback.format_exc()\r\n PY_COMPILE_ERR = PY_COMPILE_ERR.replace(\r\n script[:-len(os.path.basename(script))],\r\n '')\r\n if PYHIEW_SHOW_EXEC_ERRORS:\r\n MessageBox(PY_COMPILE_ERR)\r\n\r\n return PY_COMPILE_ERR", "def run_tasklet(tasklet_ast: ast.With, filename: str, gvars: Dict[str, Any], lvars: Dict[str, Any]):\n # Transform the decorated AST into working Python code (annotated with\n # the correct line locations so that debugging works)\n runnable_ast = TaskletRewriter().rewrite_tasklet(tasklet_ast)\n mod = ast.fix_missing_locations(runnable_ast)\n\n # Compile the transformed AST\n codeobj = compile(mod, filename, 'exec')\n\n # Run tasklet\n exec(codeobj, gvars, lvars)", "def test_run_as_module():\n from cutadapt import __version__\n\n with subprocess.Popen(\n [sys.executable, \"-m\", \"cutadapt\", \"--version\"], stdout=subprocess.PIPE\n ) as py:\n assert py.communicate()[0].decode().strip() == __version__", "def run_module(args, module_path, workspace, module_data):\n\n mod_path = module_path.replace('./', '')\n curr_path = os.getcwd()\n tfvar_path = module_path.replace('./components/', '')\n print(\"curr_path = {0}\".format(curr_path))\n print(\"DEBUG module_path = {0}\".format(module_path))\n module_name = module_path.split('/')[-1]\n print(\"DEBUG module_name = {0}\".format(module_name))\n\n key_config = \"\\\"key={0}/terraform.tfstate\\\"\".format(module_name)\n bucket_region_config = \"\\\"region={0}\\\"\".format(module_data[\"bucket_region\"])\n bucket_config = \"\\\"bucket={0}\\\"\".format(module_data[\"bucket\"])\n dynamodb_config = \"\\\"dynamodb_table={0}\\\"\".format(module_data[\"dynamodb\"])\n\n plan_output_file = \"plan.out\"\n tf_varfile = f\"{curr_path}/tfvars/{tfvar_path}/{workspace}.tfvars\"\n tf_varfile_common = f\"{curr_path}/tfvars/terraform.tfvars\"\n tf_varfile_tags = f\"{curr_path}/tfvars/core/taggings/{workspace}.tfvars\"\n backend_override = f\"{curr_path}/variables/config/backend_override.tf\"\n providers_override = f\"{curr_path}/variables/config/providers_override.tf\"\n\n softlinking_files(mod_path)\n\n remove_prev_run = f\"cd {module_path} && rm -f {plan_output_file} && rm -rf .terraform\"\n cp_override_cmd = f\"cd {module_path} && cp {backend_override} . && cp {providers_override} .\"\n\n tf_plan_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -out {plan_output_file} --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags}\"\n tf_plan_destroy_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform plan -destroy --var-file {tf_varfile} --var-file {tf_varfile_common} --var-file {tf_varfile_tags} -out {plan_output_file}\"\n tf_apply_cmd = f\"cd {module_path} && terraform workspace new {workspace} || terraform workspace select {workspace} && terraform apply {plan_output_file}\"\n tf_init_cmd = f\"cd {module_path} && terraform init --backend-config={key_config} --backend-config={bucket_region_config} --backend-config={dynamodb_config} --backend-config={bucket_config} && terraform workspace new {workspace} || terraform workspace select {workspace}\"\n print(tf_init_cmd) # let's leave this in\n\n os.system(remove_prev_run)\n os.system(cp_override_cmd)\n os.system(tf_init_cmd)\n\n if args.action.lower() == 'plan':\n # always auto approve 'plan' action\n os.system(tf_plan_cmd)\n elif args.action.lower() == 'plan-destroy':\n # always auto approve 'plan' action\n os.system(tf_plan_destroy_cmd)\n elif args.action.lower() == 'apply':\n if args.approve:\n # auto-approve flag enabled so skip user confirmation\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n os.system(tf_plan_cmd)\n # confirm with user first\n if user_confirmation(\"Sure you want to APPLY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")\n elif args.action.lower() == 'apply-destroy':\n if args.approve:\n os.system(tf_plan_cmd)\n os.system(tf_apply_cmd)\n else:\n # confirm with user first\n os.system(tf_plan_destroy_cmd)\n if user_confirmation(\"Sure you want to APPLY DESTROY {0}\".format(module_path)):\n os.system(tf_apply_cmd)\n else:\n print(\"User aborting...\")", "def run_example_local_cmd(example_module_name, example_argv):\n return run_example_local(example_module_name, example_argv)", "def _call(self,\n\t\tconsole = ExtendedConsole,\n\t\tcontext = None\n\t):\n\t\tsys.modules['__main__'] = self.module__main__\n\t\tmd = self.module__main__.__dict__\n\n\t\t# Establish execution context in the locals;\n\t\t# iterate over all the loaders in self.context and\n\t\tfor path, ldesc in self.context:\n\t\t\tltitle, loader, xpath = ldesc\n\t\t\trpath = xpath(path)\n\t\t\tli = loader(rpath)\n\t\t\tif li is None:\n\t\t\t\tsys.stderr.write(\n\t\t\t\t\t\"%s %r does not exist or cannot be read%s\" %(\n\t\t\t\t\t\tltitle, rpath, os.linesep\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\treturn 1\n\t\t\ttry:\n\t\t\t\tcode = li.get_code(rpath)\n\t\t\texcept:\n\t\t\t\tprint_exception(*sys.exc_info())\n\t\t\t\treturn 1\n\t\t\tself.module__main__.__file__ = getattr(\n\t\t\t\tli, 'get_filename', lambda x: x\n\t\t\t)(rpath)\n\t\t\tself.module__main__.__loader__ = li\n\t\t\ttry:\n\t\t\t\texec(code, md, md)\n\t\t\texcept:\n\t\t\t\te, v, tb = sys.exc_info()\n\t\t\t\tprint_exception(e, v, tb.tb_next or tb)\n\t\t\t\treturn 1\n\n\t\tif self.main == (None, None):\n\t\t\t# It's interactive.\n\t\t\tsys.argv = self.args or ['<console>']\n\n\t\t\t# Use readline if available\n\t\t\ttry:\n\t\t\t\timport readline\n\t\t\texcept ImportError:\n\t\t\t\tpass\n\n\t\t\tic = console(locals = md)\n\t\t\ttry:\n\t\t\t\tic.interact()\n\t\t\texcept SystemExit as e:\n\t\t\t\treturn e.code\n\t\t\treturn 0\n\t\telse:\n\t\t\t# It's ultimately a code object.\n\t\t\tpath, loader = self.main\n\t\t\tself.module__main__.__file__ = getattr(\n\t\t\t\tloader, 'get_filename', lambda x: x\n\t\t\t)(path)\n\t\t\tsys.argv = list(self.args)\n\t\t\tsys.argv.insert(0, self.module__main__.__file__)\n\t\t\ttry:\n\t\t\t\tcode = loader.get_code(path)\n\t\t\texcept:\n\t\t\t\tprint_exception(*sys.exc_info())\n\t\t\t\treturn 1\n\n\t\t\trv = 0\n\t\t\texe_exception = False\n\t\t\ttry:\n\t\t\t\tif context is not None:\n\t\t\t\t\twith context:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\texec(code, md, md)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\texe_exception = True\n\t\t\t\t\t\t\traise\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\texec(code, md, md)\n\t\t\t\t\texcept:\n\t\t\t\t\t\texe_exception = True\n\t\t\t\t\t\traise\n\n\t\t\texcept SystemExit as e:\n\t\t\t\t# Assume it's an exe_exception as anything ran in `context`\n\t\t\t\t# shouldn't cause an exception.\n\t\t\t\trv = e.code\n\t\t\t\te, v, tb = sys.exc_info()\n\t\t\t\tsys.last_traceback = (tb.tb_next or tb)\n\t\t\texcept:\n\t\t\t\tif exe_exception is False:\n\t\t\t\t\traise\n\t\t\t\trv = 1\n\t\t\t\te, v, tb = sys.exc_info()\n\t\t\t\tprint_exception(e, v, tb.tb_next or tb)\n\t\t\t\tsys.last_traceback = (tb.tb_next or tb)\n\n\t\t\treturn rv", "def execute(self, task, script, **kwargs):\n locals().update(kwargs)\n exec(script)", "def load_python_startup_script(name):\n\n try:\n return sys.modules[name]\n except KeyError:\n pass\n\n (fp, pathname, description) = imp.find_module(name)\n try:\n module = imp.load_module(name, fp, pathname, description)\n # Special to GPS: if the module has a on_gps_started function,\n # execute it\n module.on_gps_started('gps_started')\n except AttributeError:\n pass\n finally:\n\n if fp:\n fp.close()\n\n return module", "def setupModule(directory, moduleName, appendFile = None, includes = [\"*.py\"], excludes = []):\n\tmoduleHeader = '''#!/usr/bin/python\n# coding:utf-8\n# %(date)s\n\n# ____ ____ _ _ _ ____ ____ ____ _____ _ _ ___ _ ____ _____ \n# | __ \\| ___|| \\ / || | | __ \\| ___|| __ \\|_ _|| |_| | / _ \\ | | | ___||_ _|\n# | /| __| | \\/ || | | __ <| __| | / | | | _ |( |_| )| |_ | __| | | \n# |_|\\_\\|____||_|\\/|_||_| |____/|____||_|\\_\\ |_| |_| |_| \\___/ |___||____| |_| \n\nfrom sys import path\nfrom os import remove\n\n# Uudecode zipped module and write zip module\ntry:\n\tfrom base64 import decodebytes as decodestring \nexcept:\n\tfrom base64 import decodestring \nopen(\"%(moduleName)s.zip\",\"wb\").write(decodestring(b\"\"\"\n%(moduleContent)s\n\"\"\"))\n\n# Add zip archive module to PYTHONPATH\npath.insert(0, '%(moduleName)s.zip')\n\n# Add zip internal directory into PYTHONPATH to more easily import scripts between them\npath.insert(0, '%(moduleName)s.zip/%(moduleName)s_lib')\n\n# Import zip module\nfrom %(moduleName)s_lib import *\n\n# Remove zip module file : It is no longer useful\nremove (\"%(moduleName)s.zip\")\n'''\n\tfrom re import split as splitre, DOTALL\n\t\n\tmoduleFilename = moduleName + \".py\"\n\tmoduleContent = BytesIO()\n\tdate = getTimeString()\n\n\tzipDir(moduleContent, directory, includes, excludes + [moduleFilename], False, [[moduleName, moduleName+\"_lib\"]])\n\t\n\t# Uuencode zipped module \n\tmoduleContent = uuEncode(moduleContent.getvalue(), 8192)\n\t\n\t# Write python module\n\toutput = open(moduleFilename, \"w\")\n\toutput.write(moduleHeader%locals())\n\t\n\tif appendFile != None:\n\t\tif isString(appendFile):\n\t\t\tappendFile = [appendFile]\n\t\tfor file in appendFile:\n\t\t\tcontent = open(file,\"r\").read()\n\t\t\tspl = splitre(r\".*#<<<<(.*)#>>>>.*\", content, flags=DOTALL)\n\t\t\tif len(spl) > 1:\n\t\t\t\tcontent = spl[1]\n\t\t\toutput.write(content)\n\t\n\tprint (\"Module %s.py created\"%moduleName)\n\treturn moduleFilename", "def exercise(in_mod: tvm.IRModule, expected_mod: tvm.IRModule, reference_func, args):\n # Correctness\n rewrite_and_assert(in_mod, expected_mod)\n # Idempotence\n rewrite_and_assert(expected_mod, expected_mod)\n # The VM can compile and possibly even run the module\n if not (reference_func is None) and not (args is None):\n eval_and_assert(in_mod, reference_func, args)", "def exec_file(path: str, global_vars: Dict[str, Any]) -> None:\n with open(path) as file:\n exec(compile(file.read(), path, \"exec\"), global_vars) # pylint: disable=exec-used", "def open_script(script_path):\n pass", "def exec_main_py(git_folder):\n with cd(git_folder):\n run(\"python main.py\")", "def main():\n obj = UnityFilesystem()\n obj.perform_module_operation()", "def import_main(name):\n config.MAIN_MODULE_NAME = name\n return importlib.import_module(name)", "def run_script():\n # pylint: disable=unsupported-assignment-operation\n script_source.data['script'] = [inp_script.value]", "def load_module(name, path):\n loader = importlib.machinery.SourceFileLoader(name, path)\n module = types.ModuleType(loader.name)\n loader.exec_module(module)\n return module", "def script_test(path):\n log.info(\" ... EXECUTING {}\".format(str(path)))\n\n cmd = [sys.executable, str(path)]\n cp = subprocess.run(cmd, stderr=subprocess.PIPE)\n if cp.returncode:\n log.info(\" ... FAILED\")\n log.info(\" ___ TRACEBACK\")\n log.info(cp.stderr.decode(\"utf-8\") + \"\\n\\n\")\n return False\n else:\n log.info(\" ... PASSED\")\n return True", "def main() -> None:\n return AnsibleModule(\n argument_spec={\n \"data\": {\"default\": None},\n \"path\": {\"default\": None},\n \"file\": {\"default\": None},\n },\n )", "def execfile_(filepath: str, _globals: Any) -> None:\n with open(filepath, 'rb') as stream:\n source = stream.read()\n\n code = compile(source, filepath, 'exec')\n exec(code, _globals)", "def pyscript(fp, **context):\n try:\n exec fp in context\n except SystemExit:\n pass\n return context['response']", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def main(args=None):\n\n args, _ = root_parser.parse_known_args(args=args)\n import_path = ENTRY_POINTS[args.test]\n module = import_module(import_path)\n main_fnc = getattr(module, \"main\")\n _check_main(main_fnc)\n if args.dry:\n return\n main_fnc()", "def run_built_executable(self, name, *args, **kw):\n raise NotImplementedError", "def run(ep=None):\n global simulator, program, startpoint, recorder\n if program is None:\n print \"no program is loaded\"\n return\n\n if ep is None:\n ep = startpoint\n\n __record(ep, run, ep, True)\n simulator.run(ep)\n startpoint = ep\n exec_hooks(run)\n arrows()", "def run_python_file(python, file_args, directives=None):\n args = []\n if directives:\n for directive in directives:\n args.extend(('-X', directive))\n args.extend(file_args)\n command = (\n \"import Cython.Build.BuildExecutable as bex; \"\n \"bex.DEBUG = False; \"\n \"bex.build_and_run({args!r})\"\n ).format(args=args)\n run_python(python, command)", "def main(verbose, debug, names):\n initialize(debug)\n\n echome(names)\n # click.echo(\"hello\")\n # see\n # https://www.brianthicks.com/post/2014/11/03/build-modular-command-line-tools-with-click/", "def run_script (script, *l) :\n if not os.path.exists (script) :\n raise PQHException (\"file %s not found\" % script)\n py = get_interpreter_path ()\n cmd = \"%s %s\" % (py, script)\n if len (l) > 0 :\n cmd += \" \" + \" \".join ( [str (x) for x in l])\n out,err = run_cmd (cmd)\n return out,err" ]
[ "0.6864769", "0.6780037", "0.6562564", "0.65126973", "0.63618916", "0.62197036", "0.6204552", "0.6153162", "0.6135312", "0.61225456", "0.61223215", "0.61039793", "0.60877556", "0.6078428", "0.603614", "0.5902457", "0.5856509", "0.58559155", "0.5846937", "0.58052075", "0.5713502", "0.5711306", "0.57003784", "0.5679043", "0.5636366", "0.56026405", "0.5555395", "0.5517942", "0.5512342", "0.5512342", "0.5489458", "0.5486094", "0.54624146", "0.544211", "0.5430524", "0.5408675", "0.5405465", "0.5396972", "0.53928167", "0.5360285", "0.53580505", "0.53576803", "0.53422445", "0.53317356", "0.53210735", "0.5309054", "0.5298541", "0.52845377", "0.5282688", "0.52819437", "0.52743465", "0.5273007", "0.5254322", "0.52511984", "0.5235401", "0.52243847", "0.52180487", "0.52157754", "0.52060264", "0.5205324", "0.5195185", "0.5192692", "0.5192544", "0.5191972", "0.5185634", "0.51840836", "0.51741666", "0.5170735", "0.51623726", "0.51575094", "0.51556224", "0.5142425", "0.5131052", "0.5125997", "0.5089392", "0.508121", "0.5074855", "0.5067923", "0.50573057", "0.5037697", "0.5034441", "0.502398", "0.50182265", "0.50169146", "0.5010604", "0.5005967", "0.5003143", "0.5003143", "0.5003143", "0.5003143", "0.5003143", "0.5003143", "0.5003143", "0.5003143", "0.5003079", "0.5002837", "0.4991491", "0.49848953", "0.4980866", "0.49783054" ]
0.84551585
0
Takes an rng, returns a html form. Should be reworked.
def make_form(rng, root_name='ArchiveTransfer'): results = rng.to_form() inside = results[root_name] def make_input(value): """ depending on what is found in the rng, make the input""" what = value[0] if what.startswith('not editable'): what = what.replace('not editable:', '').replace("'", "\'") if what.startswith('attribute:value:'): what = what.replace('attribute:value:', '').replace("'", "\'") return what def walk_dict(target_dict, depth=1): """ walks through the dict, makes a form""" stuff = "" def metadata_in_name(target_string, values): """serioulsy""" return 0 in [target_string.find(value) for value in values] for rng_key, rng_val in sorted(target_dict.items(), key=lambda x: x[0]): if isinstance(rng_val, dict): cssclass = "" if metadata_in_name(rng_key, ['zeroOrMore', 'oneOrMore']): cssclass = "class='multiple'" clean_name = rng_key.replace('optional.', '').replace( 'oneOrMore.', '').replace('.data', '').replace( 'zeroOrMore.', '') stuff +="<div class=\"{0}\" >".format(clean_name) stuff += "<h{0} {2} rel='togglable' class=\"{3}_rel\">{1}<span class=\"nice-span glyphicon glyphicon-minus\"></span></h{0}>".format(depth, rng_key, cssclass, clean_name) stuff += "<div class='holder{}'>".format(depth) stuff += walk_dict(rng_val, depth + 1) else: def find_key(a_dict, key): """find keys""" for his_key, his_val in a_dict.items(): if isinstance(his_val, dict): found = find_key(his_val, key) if found: return [his_key] + found elif his_val == key: return [his_key] def make_input_name(value): """makes input name""" values = ['optional', 'value', 'oneOrMore', 'data', "zeroOrMore"] def strip_meta(this_string): """removes metadata""" wot = this_string.replace('optional', '').replace( 'oneOrMore', '').replace('.data', '').replace( 'zeroOrMore', '').replace('.', '') return wot ret = [strip_meta(tag) for tag in find_key(inside, value) if tag not in values] return ".".join(ret) stuff += "\n<div class=\"{0}\"><div style='font-weight:bold;'>{1}</div>".format( make_input_name(rng_val), ".".join(find_key(inside, rng_val))) def val_starts_with(base_string, strings): """ check if str startswith """ for the_string in strings: if base_string.startswith(the_string): return True if len(make_input(rng_val)) < 45: if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']): stuff += "<input class='selectable' value=\"{}\" style='width:87%' name=\"{}\" readonly>".format( make_input(rng_val), make_input_name(rng_val)) else: stuff += "<input class='selectable' value=\"{}\" style='width:87%' name=\"{}\">".format( "", make_input_name(rng_val)) else: if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']): stuff += "<textarea class='selectable' rows='8' cols='120' readonly name=\"{1}\">{0}</textarea>".format( make_input(rng_val), make_input_name(rng_val)) else: stuff += "<textarea class='selectable' rows='8' cols='120'>{0}</textarea>".format( "") stuff += "</div>" stuff+="</div>" stuff += "</div>" return stuff return walk_dict(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rng():\n pass", "def rng():\n pass", "def create_html(self):\n rows = self.check()\n htmlrows = \"\"\n for row in rows:\n data = self._format_row(row)\n htmlrows += data\n \n return self.TEMPLATE.format(content=htmlrows)", "def _repr_html_(self):\n return util.tree_sequence_html(self)", "def gen_html(\n conversations,\n height,\n width,\n title,\n other_speaker,\n human_speaker,\n user_icon,\n alt_icon,\n):\n html_str = f\"\"\"<html>\n<head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n <title> {title} </title>\n <style type=\"text/css\">\n @media print{{\n @page{{ margin: 0; size: {str(width)}in {str(height)}in; }}\n }}\n ul{{\n list-style: none;\n }}\n .{other_speaker}_img_div{{\n display: inline-block;\n float: left;\n margin: 18px 5px 0px -25px;\n }}\n .{human_speaker}_img_div{{\n display: inline-block;\n float: right;\n margin: 18px 15px 5px 5px;\n }}\n .{other_speaker}_img{{\n content:url({alt_icon});\n }}\n .{human_speaker}_img{{\n content:url({user_icon});\n }}\n .{other_speaker}_p_div{{\n float: left;\n }}\n .{human_speaker}_p_div{{\n float:right;\n }}\n p{{\n display:inline-block;\n overflow-wrap: break-word;\n border-radius: 30px;\n padding: 10px 10px 10px 10px;\n font-family: Helvetica, Arial, sans-serif;\n }}\n .clear{{\n float: none;\n clear: both;\n }}\n .{other_speaker}{{\n background: #eee;\n float: left;\n }}\n .{human_speaker}{{\n background: #0084ff;\n color: #fff;\n float: right;\n }}\n .breaker{{\n color: #bec3c9;\n display: block;\n height: 20px;\n margin: 20px 20px 20px 20px;\n text-align: center;\n text-transform: uppercase;\n }}\n img{{\n border-radius: 50px;\n width: 50px;\n height: 50px;\n }}\n </style>\n</head>\n<body>\n{gen_convo_ul(conversations)}\n</body>\n</html>\n \"\"\"\n return html_str", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output", "def convert_html():\n return", "def gen_html_output(strs,q):\n res = []\n res.append('<html>\\n')\n res.append('<head><title>SecPoint.com GoogleDB queries strings</title></head>\\n')\n res.append('<body>\\n')\n res.append('<p>Generated by: <a href=\"http://www.secpoint.com/\">SecPoint.com</a> GoogleDB tool</p>\\n')\n res.append('\\t<ul>\\n')\n for (x,v) in zip(strs,q):\n res.append('\\t\\t<li><a href=\"%s\">%s</a></li>\\n'%(v,x))\n res.append('\\t</ul>\\n')\n res.append('</body>\\n</html>')\n return res", "def rawHTMLrendered(self):", "def output_to_html(string_data):\n raise NotImplementedError(\"This function is not yet Implemented!\")", "def _html_repr(self):\n html = '<table id=%s>' % (self._id,)\n\n for row in range(self.rows):\n html += '<tr>'\n for col in range(self.columns):\n if row == 0 and self.header_row or col == 0 and self.header_column:\n tag = 'th'\n else:\n tag = 'td'\n html += '<%(tag)s id=%(id)s></%(tag)s>' % {\n 'tag': tag,\n 'id': self._get_cell_id(row, col),\n }\n html += '</tr>'\n html += '</table>'\n return html", "def _repr_html_(self):\n return self.data.to_html()", "def get_html_string(self, **kwargs):\n ...", "def __html__(self):\n return self.html", "def _repr_html_(self):\n return util.tree_html(self)", "def generate_html(self):\n html_text_1 = \"\"\"\n <div class=\"concept\">\n\n \t\t<div class=\"concept-title\">\n\n \t\t\t\t\"\"\" + self.title\n\n html_text_2 = \"\"\"\n \t\t</div>\n\n \t\t<div class=\"concept-description\">\n\n\t\t <p>\n\t\t\t\n \t\t \t\t\"\"\" + self.description + \"\"\" \n \n </p>\"\"\"\n\n html_text_3 = '''\n\n \t\t</div>\n\n </div>'''\n\n return html_text_1 + html_text_2 + html_text_3", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def _repr_html_(self):\n\n return self._repr__base(rich_output=True)", "def rst_to_html(input_string, source_path=None, destination_path=None,\n input_encoding='unicode', doctitle=1, initial_header_level=1):\n overrides = {'input_encoding': input_encoding,\n 'doctitle_xform': doctitle,\n 'initial_header_level': initial_header_level,\n # the next two are for security reasons, to prevent malicious\n # insertion of raw html code.\n 'file_insertion_enabled': False,\n 'raw_enabled': False,\n }\n parts = core.publish_parts(\n source=input_string, source_path=source_path,\n destination_path=destination_path,\n writer_name='html', settings_overrides=overrides)\n return parts['html_body']", "def get_html(self):\r\n pass", "def _repr_html_(self):\n return self.__repr__()", "def _repr_html_(self):\n return self.__repr__()", "def __html__(self):\n return str(self)", "def render_html(html_template, slides_src):\n return mako.template.Template(html_template, input_encoding='utf-8', output_encoding='utf-8').render(slides=slides_src)", "def render_to_html(raw):\r\n if not raw:\r\n return ''\r\n\r\n reg = re.finditer(r\"(^|(?<!\\\\))\\$(([^\\$]|\\\\\\$)*[^\\\\])\\$\", raw)\r\n\r\n # generate_html.js must be passed all the math text ask command line args. \r\n # The dollar signs get stripped in advanced because the shell will interpret \r\n # those as variables. The program will return each math object separated by\r\n # newlines. KaTeX doesn't understand actual dollar signs if they are\r\n # followed by another character (like x=\\$2), so add a space after those\r\n results = [(mat.start(2), \r\n mat.end(2), \r\n mat.group(2).strip().replace('\\\\$', '\\\\$ ')\r\n ) for mat in reg if mat]\r\n\r\n if results == []:\r\n return raw\r\n\r\n math_start_positions, math_end_positions, raw_math = zip(*results)\r\n\r\n # prepare the shell to get the LaTeX via a call to Node.js\r\n # the shell is not explicitly called so there's no danger of shell injection\r\n # The command `node` must be on the system path\r\n env = dict(os.environ)\r\n env['LC_ALL'] = 'en_US.UTF-8' # accept unicode characters as output\r\n try:\r\n p = subprocess.Popen([\r\n 'node', \r\n os.path.join(os.path.dirname(__file__), 'generate_html.js')] \r\n + list(raw_math),\r\n env=env, \r\n stdout=subprocess.PIPE, \r\n stderr=subprocess.PIPE)\r\n except (WindowsError, OSError):\r\n raise NodeError(\"Node.js is not on your system path.\")\r\n else:\r\n node_output, node_error = p.communicate()\r\n \r\n if node_error:\r\n raise NodeError(node_error)\r\n\r\n if six.PY3:\r\n node_output = node_output.decode('UTF-8')\r\n \r\n html_bits = node_output.strip('\\n').split('\\n')\r\n\r\n final = []\r\n loc = 0\r\n for index, code in enumerate(html_bits):\r\n # measurements are one off from the index of the math to eliminate the\r\n # dollar sign specifiers\r\n # KaTeX will handle HTML encoding for the math text, but regular text\r\n # must have HTML stripped out for security reasons.\r\n final.append(cgi.escape(raw[loc:math_start_positions[index]]\r\n .strip('$').replace('\\\\$', '$')))\r\n final.append(smart_unicode(code))\r\n loc = math_end_positions[index] + 1\r\n\r\n final.append(cgi.escape(raw[loc:].replace('\\\\$', '$')))\r\n return u''.join(final)", "def _repr_html_(self):\n import jinja2 # noqa\n\n call_result = self._get_call_result()\n\n id_result = str(id(self) + np.random.random()).replace(\".\", \"rr\")\n\n params = {\n \"result\": self,\n \"id_result\": id_result,\n \"call_result\": call_result,\n \"json_result\": json.dumps(self.json_, indent=2),\n }\n return jinja2.Template(RESULT_HTML_TEMPLATE).render(**params)", "def get_html(self):\r\n # TODO: why are there nested html tags here?? Why are there html tags at all, in fact?\r\n html = '<html><html>%s</html><html>%s</html></html>' % (\r\n self.mathstr, saxutils.escape(self.xml.tail))\r\n try:\r\n xhtml = etree.XML(html)\r\n except Exception as err:\r\n if self.system.DEBUG:\r\n msg = '<html><div class=\"inline-error\"><p>Error %s</p>' % (\r\n str(err).replace('<', '&lt;'))\r\n msg += ('<p>Failed to construct math expression from <pre>%s</pre></p>' %\r\n html.replace('<', '&lt;'))\r\n msg += \"</div></html>\"\r\n log.error(msg)\r\n return etree.XML(msg)\r\n else:\r\n raise\r\n return xhtml", "def create_form_html():\n data_file = os.path.join('data', 'data.csv')\n data = pd.read_csv(data_file, index_col=0)\n example1 = data.iloc[0, :178]\n example2 = data.iloc[4340, : 178]\n placeholder = ', '.join(example1.astype(str))\n example_str1 = textwrap.fill(placeholder, 80)\n example_str2 = textwrap.fill(', '.join(example2.astype(str)), 80)\n form_html = ('''\n <html><body>\n <h1>Binary classifier for Epileptic Seizure Recognition Data \n Set</h1>\n <h2>Please enter features for classification</h1>\n (178 integers, separated by commas)\n <form method=\"post\" action=\"\">\n <textarea name=\"query\" cols=\"80\" rows=\"10\">'''\n + placeholder\n + ''' </textarea>\n <input type=\"submit\">\n </form>\n <p> Example non-seizure data point:\n '''\n + example_str1\n + '''<p> Example seizure data point: '''\n + example_str2\n + '''</body></html>''')\n return form_html", "def render(self, text, apply_spammer_limits=False):\n if False:\n regex = re.compile(\"&(?!nbsp;)\");\n body = regex.sub( \"&amp;\", text )\n regex = re.compile(\"<(/?)([a-zA-Z]+?)( .*?)?/?>\")\n return regex.sub( htmltag_replace, body )\n return \"\"", "def htmlize(text):\n htmlized = markdown.markdown(\n text,\n output_format=\"xhtml5\", safe_mode=\"escape\",\n )\n htmlversion = htmltemplate.format(body=htmlized)\n return htmlversion", "def _repr_html_(self) -> str:\n output_html = self.template_base.render(context=self.context)\n return output_html", "def vhdl2html(expression):\n rules = (\n (r'\\b([a-zA-Z_][a-zA-Z0-9_]*[0-9]+)\\b',\n lambda match: '<span class=\"vhdlsig\">{0}</span>'.format(match.group(1))),\n (r'\\b(and|or|xor|not|AND|OR|XOR|NOT)\\b',\n lambda match: '<span class=\"vhdlop\">{0}</span>'.format(match.group(1).lower())),\n )\n for pattern, repl in rules:\n expression = re.sub(pattern, repl, expression)\n return expression", "def rest2html(s):\n return core.publish_string(s, writer=html_fragment_writer)", "def _repr_html_(self):\n return html_table(self)", "def get_html_string_representation(self):\n return self.map.get_root().render()", "def sqlhtml(input):\n output=sql(html(input))\n return output", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def html(self) -> SafeString:\n return format_html(self.__html__())", "def to_html(self, result_dir):\n png_path = self.png_path(result_dir)\n data_table = self.html_data_table()\n return \"XXX figure html\"", "def _repr_html_(self): # pragma: no cover\n return Utils.render_html('extent.html', extent=self)", "def GenerateTimeblockString(self, time_range):\r\n html_string = \"<tr><td align = center>%s</td>\" % time_range\r\n html_string += \"<td align = center>%s</td>\" % time_range.total\r\n html_string += \"<td align = center>%s</td>\" % time_range.canceled\r\n html_string += \"<td align = center>%s</td>\" % time_range.errors\r\n html_string += \"<td align = center>%s</td>\" % (\r\n self.FormatTimeString(time_range.AverageRigScanTime()))\r\n html_string += \"<td align = center>%s</td>\" % (\r\n self.FormatTimeString(time_range.AverageOperatorScanTime()))\r\n html_string += \"<td align = center>%s</td>\" % (\r\n self.FormatTimeString(time_range.AverageIdletime()))\r\n html_string += \"<td align = center>%s</td></tr>\\n\" % (\r\n self.FormatTimeString(time_range.TotalIdleTime()))\r\n\r\n return html_string", "def enml_to_html(enml):\n return normalize_enml(enml)", "def htmlstr(self, unsafe) :\n\t\tunsafe = string.replace(unsafe, '&', '&amp;')\n\t\tunsafe = string.replace(unsafe, '<', '&lt;')\n\t\treturn string.replace(unsafe, '>', '&gt;')", "def get_html(self):\r\n if self.template is None:\r\n raise NotImplementedError(\"no rendering template specified for class {0}\"\r\n .format(self.__class__))\r\n\r\n context = self._get_render_context()\r\n\r\n html = self.capa_system.render_template(self.template, context)\r\n\r\n try:\r\n output = etree.XML(html)\r\n except etree.XMLSyntaxError as ex:\r\n # If `html` contains attrs with no values, like `controls` in <audio controls src='smth'/>,\r\n # XML parser will raise exception, so wee fallback to html5parser, which will set empty \"\" values for such attrs.\r\n try:\r\n output = html5lib.parseFragment(html, treebuilder='lxml', namespaceHTMLElements=False)[0]\r\n except IndexError:\r\n raise ex\r\n\r\n return output", "def get_spans_html(self, spans, tokens):\n html_str = ''\n for start, end, type in spans:\n color = 'red' if type else 'black'\n span_tokens = tokens[start : end + 1]\n span_str = '<span style=\"color:{}\">{}</span> '.format(color, ' '.join(span_tokens))\n html_str += span_str\n return html_str", "def mdhtml_to_html(data_str):\n mdrenderer = mistune.Renderer()\n markdown = mistune.Markdown(renderer=mdrenderer)\n return markdown(data_str)", "def _gen_html(self):\n yield \"\\n<table border=%r summary='a table'>\\n\" % self.border\n header = self.header\n for row in self:\n yield \"<tr>\\n \"\n for el in row:\n if header:\n yield \"<th>%s</th> \" % el\n else:\n yield '<td bgcolor=\"%s\">%s</td> ' % \\\n (getattr(row, \"color\", self.color), el)\n yield \"\\n</tr>\\n\"\n header = False\n yield \"</table>\\n\"", "def expr2html(expression):\n rules = (\n (r',',\n r', '),\n (r'(dist|comb|mass|mass_inv|mass_trv|dist_orm|comb_orm|mass_inv_orm|mass_trv_orm)(\\{)([^\\}]*)(\\})',\n r'<span class=\"function\">\\1</span><span class=\"curl\">\\2</span>\\3<span class=\"curl\">\\4</span>'),\n (r'\\b(AND|OR|XOR|NOT)\\b',\n r'<span class=\"keyword\">\\1</span>'),\n )\n for pattern, repl in rules:\n expression = re.sub(pattern, repl, expression)\n return expression", "def convert_text_to_rouge_format(text, title=\"dummy title\"):\n sentences = text.split(\"\\n\")\n sent_elems = [\n \"<a name=\\\"{i}\\\">[{i}]</a> <a href=\\\"#{i}\\\" id={i}>\"\n \"{text}</a>\".format(i=i, text=sent)\n for i, sent in enumerate(sentences, start=1) if sent != '']\n html = \"\"\"<html>\n<head>\n<title>{title}</title>\n</head>\n<body bgcolor=\"white\">\n{elems}\n</body>\n</html>\"\"\".format(title=title, elems=\"\\n\".join(sent_elems))\n\n return html", "def get_html(self):\n html_str = ''\n # Input\n input_form = 'Written' if self.mode == constants.TN_MODE else 'Spoken'\n padding_multiplier = 1 if self.mode == constants.TN_MODE else 2\n padding_spaces = ''.join(['&nbsp;'] * padding_multiplier)\n input_str = f'<b>[Input ({input_form})]{padding_spaces}</b>: {self._input}</br>\\n'\n html_str += input_str + ' '\n # Target\n target_html = self.get_spans_html(self.target_spans, self.target_tokens)\n target_form = 'Spoken' if self.mode == constants.TN_MODE else 'Written'\n target_str = f'<b>[Target ({target_form})]</b>: {target_html}</br>\\n'\n html_str += target_str + ' '\n # Pred\n pred_html = self.get_spans_html(self.pred_spans, self.pred_tokens)\n padding_multiplier = 10 if self.mode == constants.TN_MODE else 11\n padding_spaces = ''.join(['&nbsp;'] * padding_multiplier)\n pred_str = f'<b>[Prediction]{padding_spaces}</b>: {pred_html}</br>\\n'\n html_str += pred_str + ' '\n # Classes\n padding_multiplier = 15 if self.mode == constants.TN_MODE else 16\n padding_spaces = ''.join(['&nbsp;'] * padding_multiplier)\n class_str = f'<b>[Classes]{padding_spaces}</b>: {self.classes}</br>\\n'\n html_str += class_str + ' '\n # Space\n html_str += '</br>\\n'\n return html_str", "def to_single_html(self):\n self.error_throw('output')\n \n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('single_html')\n else:\n self.output('single_html')", "def to_html(self, data=None, **kwargs) -> str:\n html = self.create_container()\n return html", "def html_manual_format(string):\n return html_div(string, \"manualfmt\")", "def to_html(self, header = False):\n self.header = header\n return \"\".join(self._gen_html())", "def rng() -> int:", "def rngnext():\n out = []\n # random\n state = random.getstate()\n out.append(f\"r={random.random():0.4f}\")\n random.setstate(state)\n\n # numpy\n state = np.random.get_state()\n out.append(f\"n={np.random.random():0.4f}\")\n np.random.set_state(state)\n\n # torch\n state = torch.random.get_rng_state()\n out.append(f\"t={torch.rand(1)[0]:0.4f}\")\n torch.random.set_rng_state(state)\n\n # cuda\n if torch.cuda.is_available():\n state = torch.cuda.get_rng_state()\n # note there is no function for generating a random in cuda but this may work?\n out.append(f\"c={state.float().std()%1:0.4f} {torch.backends.cudnn.deterministic}\")\n\n return out", "def getHtml(self):\n if len(self.rows)<1:\n return ''\n if self.useTableSorter:\n if self.tableAttr:\n h = '<table %s>\\n' % self.tableAttr\n else:\n h = '<table class=\"tablesorter\">\\n'\n h += '<thead>\\n'\n h += self.rows[0]\n h += '\\n</thead><tbody>\\n'\n h += '\\n'.join(self.rows[1:])\n h += '\\n</tbody></table>\\n'\n else:\n h = '<table%s>\\n' % sep(self.tableAttr)\n h += '\\n'.join(self.rows)\n h += '\\n</table>\\n'\n return h", "def randrange_fmt(mode, char, obj):\n x = randrange(*fmtdict[mode][char])\n if char == 'c':\n x = bytes([x])\n if obj == 'numpy' and x == b'\\x00':\n x = b'\\x01'\n if char == '?':\n x = bool(x)\n if char == 'f' or char == 'd':\n x = struct.pack(char, x)\n x = struct.unpack(char, x)[0]\n return x", "def test_html_repr():\n repr_html = grid._repr_html_()\n assert repr_html is not None", "def __html__(self):\n if not self.hasArticle:\n return None\n\n if self.bbcode_is_active:\n return self._bbcodeAsHtml\n\n return self.html", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def rng() -> int:\n ...", "def htmlise(s):\n return '<div><pre class=\"tablecell\">' + html.escape(s) + '</pre></div>'", "def epbunchlist2html(epbunchlist):\n def epbunch2html(epbunch):\n lines = epbunch.obj[:2]\n return '->'.join(lines)\n lines = [epbunch2html(epbunch) for epbunch in epbunchlist]\n return \", \".join(lines)", "def r(w,rangestart,rangeend):\r\n if w == 'r':\r\n print(random.random(rangestart , rangeend))\r\n if w == 'ri':\r\n print(random.randint(rangestart,rangeend))", "def html_sequence(seq_esc: \"Sequence\") -> str:\n items = (f\"<li>{htmlize(item)}</li>\" for item in seq_esc)\n return \"<ul>\\n\" + \"\\n\".join(items) + \"\\n</ul>\"", "def to_html(content):\n headers = content[0].keys()\n rows = (r.values() for r in content)\n return html_table(headers, rows)", "def get_html(html: str):\r\n WRAPPER = \"\"\"<div style=\"overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem\">{}</div>\"\"\"\r\n # Newlines seem to mess with the rendering\r\n html = html.replace(\"\\n\", \" \")\r\n return WRAPPER.format(html)", "def tohtml(body: Union[str, Iterable], model: Any) -> str:\n strbody: str = \"\"\n if isinstance(body, str):\n strbody = cast(str, body)\n else:\n lines = list(body)\n if len(lines) > 0:\n if hasattr(lines[0], 'tohtml'):\n strbody = lines[0].tohtml(lines)\n else:\n strbody = (\n '<table>'\n + ''.join(\n f\"<tr>{''.join(_build_elem(i) for i in j)}</tr>\"\n for j in body\n )\n + '</table>'\n )\n\n if strbody:\n for tpe in OPTIONS:\n strbody = tpe.replace(model, strbody)\n return strbody", "def get_rubric_html(self, system):\r\n if self.child_state == self.INITIAL:\r\n return ''\r\n\r\n rubric_renderer = CombinedOpenEndedRubric(system, False)\r\n rubric_dict = rubric_renderer.render_rubric(self.child_rubric)\r\n success = rubric_dict['success']\r\n rubric_html = rubric_dict['html']\r\n\r\n # we'll render it\r\n context = {\r\n 'rubric': rubric_html,\r\n 'max_score': self._max_score,\r\n }\r\n\r\n if self.child_state == self.ASSESSING:\r\n context['read_only'] = False\r\n elif self.child_state in (self.POST_ASSESSMENT, self.DONE):\r\n context['read_only'] = True\r\n else:\r\n # This is a dev_facing_error\r\n raise ValueError(\"Self assessment module is in an illegal state '{0}'\".format(self.child_state))\r\n\r\n return system.render_template('{0}/self_assessment_rubric.html'.format(self.TEMPLATE_DIR), context)", "def get_html_from_rst(rst):\n\n compiler = nikola.plugins.compile.rest.CompileRest()\n compiler.set_site(FakeSite())\n return compiler.compile_string(rst)[0]", "def html(template, **data):\n tmpl = template_loader.load(template)\n context = {}\n context_setup.dispatch(context)\n context.update(data)\n stream = tmpl.generate(**context)\n return stream", "def create_html(text, template, output):\n\n # TODO uncomment this for orginal DMP format (right now difficult with differing section sizes)\n #templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates/new\")\n templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n TEMPLATE_FILE = \"template_\" + template.lower() + \".html\"\n real_template = templateEnv.get_template(TEMPLATE_FILE)\n\n outputText = real_template.render(contact=text)\n html_file = open(output + \".html\", \"w\")\n html_file.write(outputText)\n html_file.close()\n\n return output + \".html\"", "def to_html(self, content, request, **parameters):\n raise NotImplementedError", "def create_html_string(sheet):\n html = ''\n\n title = unicode(sheet.get('title', '')).strip()\n html += '{}<br>'.format(title)\n\n # author = ''\n # html += '<em>Source Sheet by <a href=\"{}\">{}</a><em>'\n\n for source in sheet['sources']:\n if 'text' in source:\n english = unicode(source['text'].get('en', '')).strip()\n hebrew = unicode(source['text'].get('he', '')).strip()\n html += '{}<br>{}'.format(english, hebrew)\n elif 'outsideText' in source:\n html += unicode(source['outsideText']).strip()\n elif 'comment' in source:\n html += unicode(source['comment']).strip()\n html += '<br><br>'\n\n return html.encode('utf-8')", "def rng():\n return numpy.random.default_rng(564)", "def randrange_fmt(mode, char, obj):\n x = randrange(*fmtdict[mode][char])\n if char == 'c':\n x = bytes([x])\n if obj == 'numpy' and x == b'\\x00':\n # http://projects.scipy.org/numpy/ticket/1925\n x = b'\\x01'\n if char == '?':\n x = bool(x)\n if char == 'f' or char == 'd':\n x = struct.pack(char, x)\n x = struct.unpack(char, x)[0]\n return x", "def get_html(self, *args, **kwargs):\n return Text(self.get_data(*args, **kwargs), escape=False)", "def test_repr_html_(curve):\n html = curve._repr_html_()\n assert html[77] == '<'", "def get_outer_html(self):\n\n pass", "def _repr_html_(self):\n return \"<td><b>{0}</b></td><td>{1}</td>\".format(self.id, self.title)", "def _write_html_torso(self, recipient: str) -> str:\n torso: str = ''\n debt_statements: List = list()\n credit_statements: List = list()\n for currency, reimbs in self.reimbursement_matrices.items():\n debts = reimbs[recipient].dropna()\n credits = reimbs.loc[recipient].dropna()\n for creditor, credit in debts.iteritems():\n debt_statements.append(\n attach_tag_li(f'{creditor}, {credit:.2f} {currency}'))\n for debtor, debt in credits.iteritems():\n credit_statements.append(\n attach_tag_li(f'{debtor}, {debt:.2f} {currency}'))\n\n if len(debt_statements) == 1:\n torso += attach_tag_p('Please reimburse the following '\n + 'participant:') + '\\n' \\\n + attach_tag_ul(debt_statements[0])\n elif len(debt_statements) > 1:\n torso += attach_tag_p('Please reimburse the following '\n + 'participants:') + '\\n' \\\n + attach_tag_ul('\\n'.join(debt_statements))\n else:\n torso += attach_tag_p('You don\\'t have any payable '\n + 'reimbursements.')\n\n torso += '\\n'\n\n if len(credit_statements) == 1:\n torso += attach_tag_p('The following participant is obligated to '\n + 'reimburse you:') \\\n + attach_tag_ul(credit_statements[0])\n elif len(credit_statements) > 1:\n torso += attach_tag_p('The following participants are obligated '\n + 'to reimburse you:') \\\n + attach_tag_ul('\\n'.join(credit_statements))\n else:\n torso += attach_tag_p('You don\\'t have any receivable '\n + 'reimbursements.')\n\n return attach_tag_div(torso)", "def _repr_html_(self) -> str:\n return self.all(pandas=True)._repr_html_() # type: ignore", "def txt_to_html(in_str):\n replace_list = {\n \">\": \"&gt;\",\n \"<\": \"&lt;\",\n \"\\n\": \"<br/>\",\n }\n for i in replace_list:\n in_str = re.sub(i, replace_list[i], in_str)\n return in_str", "def _repr_html_(self) -> str:\n html_template = \"\"\"\n <script src=\"{webcomponents_js}\"></script>\n <link rel=\"import\" href=\"{facets_html}\">\n <facets-dive id=\"dive_elem\" height=\"{height}\"></facets-dive>\n <script>\n document.querySelector(\"#dive_elem\").data = {data};\n </script>\"\"\"\n html = html_template.format(\n facets_html=FACETS_DEPENDENCIES['facets_html'],\n webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'],\n data=self._data.to_json(orient='records'),\n height=self.height,\n )\n return html", "def render_html(self):\n return self.template.render(content=self.content, **self.styles)", "def reconstruct_harlowe_html(story_attribs, other_elems, passages):\n\n passages_html = '\\n'.join([str(passage_obj) for _, passage_obj in passages.items()])+'\\n'\n\n story_elem = etree.Element(_STORY_TAG, story_attribs)\n if other_elems:\n story_elem.extend(other_elems)\n\n story_html = etree.tostring(story_elem, encoding='unicode')\n\n # Add the passages_html in by hand, since adding it to an xml element would escape\n # all of the angle brackets, turning them into &lt; and &gt;\n before, sep, after = story_html.partition('</'+_STORY_TAG+'>')\n story_html = before+passages_html+sep+after\n\n return story_html", "def rstjinja(app, docname, source):\n # Make sure we're outputting HTML\n if app.builder.format != 'html':\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered", "def render_to_html(env_spec_list):\n if not env_spec_list:\n return []\n html_output = \"\"\n\n for env_spec_entry in env_spec_list:\n if env_spec_entry[\"choices\"] is None:\n ret_str = render_label(env_spec_entry)\n ret_str += render_input(env_spec_entry)\n else:\n ret_str = render_label(env_spec_entry)\n ret_str += f'<select id=\"env_spec_{env_spec_entry[\"name\"].lower()}\" name=\"{env_spec_entry[\"name\"].lower()}\">\\n'\n\n for choice in env_spec_entry[\"choices\"]:\n ret_str += render_choice(\n choice, choice == env_spec_entry[\"default_value\"]\n )\n ret_str += \"</select>\\n\"\n\n if env_spec_entry[\"comment\"] is not None:\n ret_str += f\"<small>{env_spec_entry['comment']}</small>\\n\"\n\n html_output += ret_str\n return html_output", "def toHTML(self):\n return oidutil.autoSubmitHTML(self.toFormMarkup())", "def rstjinja(app, docname, source):\n # Make sure we're outputting HTML\n if app.builder.format != \"html\":\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered", "def rstjinja(app, docname, source):\n # Make sure we're outputting HTML\n if app.builder.format != \"html\":\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered", "def get_html(self):\n if self.value == True:\n color = \"red\"\n else:\n color = \"lightgray\"\n result_str = self.value\n\n # Add a link to the species list\n if self.link is not None:\n result_str = '<a href=\"%s\">%s</a>' % (self.link, result_str)\n\n return '<td style=\"background: %s\">%s</td>' % (color, result_str)", "def get_html(self):\r\n html = '<section class=\"targeted-feedback-span\"><span>{}</span></section>'.format(etree.tostring(self.xml))\r\n try:\r\n xhtml = etree.XML(html)\r\n except Exception as err: # pylint: disable=broad-except\r\n if self.system.DEBUG:\r\n msg = \"\"\"\r\n <html>\r\n <div class=\"inline-error\">\r\n <p>Error {err}</p>\r\n <p>Failed to construct targeted feedback from <pre>{html}</pre></p>\r\n </div>\r\n </html>\r\n \"\"\".format(err=cgi_escape(err), html=cgi_escape(html))\r\n log.error(msg)\r\n return etree.XML(msg)\r\n else:\r\n raise\r\n return xhtml", "def _repr_html_(self):\n nb_ticks = 7\n delta_x = math.floor(self.width / (nb_ticks - 1))\n x_ticks = [(i) * delta_x for i in range(0, nb_ticks)]\n delta_val = delta_x * (self.vmax - self.vmin) / self.width\n val_ticks = [round(self.vmin + (i) * delta_val, 1) for i in range(0, nb_ticks)]\n\n return (\n f'<svg height=\"40\" width=\"{self.width}\">'\n + \"\".join(\n [\n (\n '<line x1=\"{i}\" y1=\"15\" x2=\"{i}\" '\n 'y2=\"27\" style=\"stroke:{color};stroke-width:2;\" />'\n ).format(\n i=i * 1,\n color=self.rgba_hex_str(\n self.vmin + (self.vmax - self.vmin) * i / (self.width - 1),\n ),\n )\n for i in range(self.width)\n ],\n )\n + '<text x=\"0\" y=\"38\" style=\"text-anchor:start; font-size:11px; font:Arial\">{}</text>'.format( # noqa\n self.vmin,\n )\n + \"\".join(\n [\n (\n '<text x=\"{}\" y=\"38\"; style=\"text-anchor:middle; font-size:11px; font:Arial\">{}</text>' # noqa\n ).format(x_ticks[i], val_ticks[i])\n for i in range(1, nb_ticks - 1)\n ],\n )\n + '<text x=\"{}\" y=\"38\" style=\"text-anchor:end; font-size:11px; font:Arial\">{}</text>'.format(\n self.width,\n self.vmax,\n )\n + '<text x=\"0\" y=\"12\" style=\"font-size:11px; font:Arial\">{}</text>'.format(\n self.caption,\n )\n + \"</svg>\"\n )", "def to_multiple_htmls(self):\n self.error_throw('output')\n \n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('multiple_htmls')\n else:\n self.output('multiple_htmls')", "def get_html(self):\r\n goal_level = '{0}-{1}'.format(\r\n self.required_level,\r\n self.required_sublevel)\r\n\r\n showbasic = (self.show_basic_score.lower() == \"true\")\r\n showleader = (self.show_leaderboard.lower() == \"true\")\r\n\r\n context = {\r\n 'due': self.due,\r\n 'success': self.is_complete(),\r\n 'goal_level': goal_level,\r\n 'completed': self.completed_puzzles(),\r\n 'top_scores': self.puzzle_leaders(),\r\n 'show_basic': showbasic,\r\n 'show_leader': showleader,\r\n 'folditbasic': self.get_basicpuzzles_html(),\r\n 'folditchallenge': self.get_challenge_html()\r\n }\r\n\r\n return self.system.render_template('foldit.html', context)", "def get_html(self):\r\n context = self.get_context()\r\n html = self.system.render_template(\r\n '{0}/combined_open_ended.html'.format(self.TEMPLATE_DIR), context\r\n )\r\n return html", "def get_HTML(self) :\n\t\t# the final result is composed of two parts: the prompt and the results.\n\t\t# we can \"pseudo\" concatenate them with the jaxml \"+\" operator:\n\t\tif (self.__htmlfinal is not None) and (self.__temphtml is not None):\n\t\t\tself.__htmlfinal = self.__htmlfinal + self.__temphtml\n\t\treturn str(self.__htmlfinal)" ]
[ "0.5834163", "0.5834163", "0.5702302", "0.56189793", "0.5424083", "0.5400542", "0.5398129", "0.53365695", "0.53144455", "0.5303006", "0.5300281", "0.5285712", "0.5281219", "0.52759695", "0.52519524", "0.52497476", "0.52317435", "0.5196451", "0.5196451", "0.5191627", "0.516962", "0.51471925", "0.51471925", "0.5144673", "0.5063963", "0.50274295", "0.5024352", "0.50234747", "0.5014132", "0.50102776", "0.49876064", "0.49691612", "0.4962164", "0.49603474", "0.4957625", "0.49551052", "0.49452958", "0.4945117", "0.4945117", "0.49315974", "0.4901553", "0.48996797", "0.48767915", "0.48751575", "0.4860821", "0.4842005", "0.483468", "0.4834418", "0.48324156", "0.48226672", "0.4816397", "0.48109215", "0.4806483", "0.48006883", "0.47934315", "0.4789857", "0.47880107", "0.4785405", "0.47820327", "0.4776817", "0.47729158", "0.4770619", "0.47592124", "0.47579288", "0.47482294", "0.4744298", "0.47429442", "0.47424397", "0.4739037", "0.4733149", "0.47322166", "0.47299522", "0.471447", "0.47139654", "0.4707842", "0.47066656", "0.47035685", "0.47034883", "0.4691772", "0.46908653", "0.4689323", "0.4675123", "0.46702656", "0.4663764", "0.4657601", "0.46559405", "0.46552718", "0.46526858", "0.4648368", "0.46461526", "0.46432412", "0.46404773", "0.46404773", "0.46397668", "0.4629637", "0.46271735", "0.46224478", "0.4618076", "0.46177882", "0.46170855" ]
0.46454608
90
depending on what is found in the rng, make the input
def make_input(value): what = value[0] if what.startswith('not editable'): what = what.replace('not editable:', '').replace("'", "\'") if what.startswith('attribute:value:'): what = what.replace('attribute:value:', '').replace("'", "\'") return what
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rng():\n pass", "def rng():\n pass", "def __call__(self, mutant, rng):\n raise NotImplementedError", "def find_input():\n\n mat = [[5,0],[0,0],[0,0]]\n num_range = [0,2]\n sub = 3 #Number of pixels to be substituted.\n\n input = [mat, num_range, sub]\n\n return input", "def rng() -> int:", "def solution(s):", "def __init__(self, rng):\n self.rng = rng\n self.state = None", "def init_population(self, task):\n if task.max_iters != np.inf:\n total_candidates = task.max_iters\n elif task.max_evals != np.inf:\n total_candidates = task.max_evals\n else:\n total_candidates = 0\n self.candidates = []\n x = None\n for i in range(total_candidates):\n while True:\n x = task.lower + task.range * self.random(task.dimension)\n if not np.any([np.all(a == x) for a in self.candidates]):\n self.candidates.append(x)\n break\n\n x_fit = task.eval(self.candidates[0])\n return x, x_fit, {}", "def __init__(self, rng, low, high):\n\t\tself.rng = rng\n\t\tself.low = low\n\t\tself.high = high", "def rng() -> int:\n ...", "def only_choice(values):\r\n\r\n ''' Your solution here '''\r\n\r\n new_values = values.copy()\r\n\r\n for unit in unitlist:\r\n for digit in '123456789':\r\n dplaces = [box for box in unit if digit in values[box]]\r\n if len(dplaces) == 1:\r\n new_values[dplaces[0]] = digit\r\n return new_values", "def reconstruct_input(self, ix):", "def _merge_template_search(self, inputs):\n seq_dict = defaultdict(list)\n # flatten and permute\n for input_dic in inputs:\n for name, x in input_dic.items():\n if name == 'mask':\n seq_dict[name].append(x.flatten(1))\n else:\n seq_dict[name].append(\n x.flatten(2).permute(2, 0, 1).contiguous())\n # concatenate\n for name, x in seq_dict.items():\n if name == 'mask':\n seq_dict[name] = torch.cat(x, dim=1)\n else:\n seq_dict[name] = torch.cat(x, dim=0)\n return seq_dict", "def make_individual():\n pool = list(range(box_count))\n chromosome = [pool.pop(randrange(box_count))]\n (x, y) = (0, 0)\n for i in range(1, box_count):\n x += 1\n if x == col_count:\n x = 0\n y += 1\n candidates = set()\n else:\n candidates = successors[chromosome[i-1]].intersection(pool)\n chromosome.append(pool.pop(pool.index(choice(tuple(candidates))) if candidates else randrange(len(pool))))\n return Individual(evaluate(chromosome), chromosome)", "def build_initial(domain):\n return random_candidate_float(domain)", "def generate_possible_coords(starting,a_range,min_cell_distance): \n a_raw= np.arange(a_range[0]+starting,a_range[1]-starting+1,min_cell_distance)\n \n if len(a_raw) == 0:\n return a_raw\n \n if not check_if_range_filled(a_range,a_raw[-1], min_cell_distance):\n # put one more number on the end if the range is not filled\n a_raw= np.arange(a_range[0]+starting,a_range[1],min_cell_distance) \n\n return a_raw", "def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)", "def rngnext():\n out = []\n # random\n state = random.getstate()\n out.append(f\"r={random.random():0.4f}\")\n random.setstate(state)\n\n # numpy\n state = np.random.get_state()\n out.append(f\"n={np.random.random():0.4f}\")\n np.random.set_state(state)\n\n # torch\n state = torch.random.get_rng_state()\n out.append(f\"t={torch.rand(1)[0]:0.4f}\")\n torch.random.set_rng_state(state)\n\n # cuda\n if torch.cuda.is_available():\n state = torch.cuda.get_rng_state()\n # note there is no function for generating a random in cuda but this may work?\n out.append(f\"c={state.float().std()%1:0.4f} {torch.backends.cudnn.deterministic}\")\n\n return out", "def _generate_base_candidates(self, target_text):\n\n result_list = []\n tagged_text = tag(target_text)\n\n for i in range(1, 5):\n temp = []\n grams = find_ngrams(tagged_text, i)\n\n for gram in grams:\n phrase = \" \".join(list(map(lambda x: x[0], gram)))\n pos = \" \".join(list(map(lambda x: x[1], gram)))\n\n if pos in self.candidate_pattern:\n temp.append(phrase)\n\n result_list.append(temp)\n\n return result_list", "def get_ranx0(rng):\n xr = range(-100, 101)\n x1 = rng.choice(xr)\n x0 = (x1,)\n return x0", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def sample(self, rng, query_value=None):\n nodes = jnp.array(self.nodes)\n query_value = (\n jax.random.uniform(rng) if query_value is None else query_value)\n query_value *= self._total_priority()\n\n _, index, _ = jax.lax.fori_loop(0, self.depth, step,\n (query_value, 0, nodes))\n\n return np.minimum(index - self.low_idx, self.highest_set)", "def suggest(suggestions):\n weight_sum = sum(suggestions.values())\n prob_ranges = []\n lower_bound = 0.0\n\n # generate probability ranges\n for task, weight in suggestions.iteritems():\n upper_bound = lower_bound + weight / weight_sum\n prob_ranges.append((task, (lower_bound, upper_bound)))\n\n # update lower bound\n lower_bound = upper_bound\n\n rand_number = random.random()\n\n for task, (low, high) in prob_ranges:\n if low <= rand_number < high:\n return task\n\n raise AssertionError('Should not be here. O_O');", "def only_choice(values):\n for unit in unitlist:\n for d in '123456789':\n # array of boxes for the digit d\n destinations = [b for b in unit if d in values[b]]\n if len(destinations) == 1:\n values = assign_value(values, destinations[0], d)\n return values", "def new_tile(self):\r\n # replace with your code\r\n # complete search ....\r\n non_zero_count = 0;\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if self._grid_tile[row][col] == 0:\r\n non_zero_count += 1\r\n random_choice = random.randrange(0, non_zero_count)\r\n count = 0\r\n # another search ....\r\n generated_new_tile = False\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if generated_new_tile == False and self._grid_tile[row][col] == 0:\r\n if count != random_choice:\r\n count += 1 \r\n else:\r\n if random.randrange(0,100) < 10:\r\n self.set_tile(row, col ,4)\r\n else:\r\n self.set_tile(row, col ,2)\r\n generated_new_tile = True", "def genSubstitutions(molecule, fr, to):\n for m in re.finditer(fr, molecule):\n yield molecule[:m.start()] + to + molecule[m.end():]", "def source_input(env, \r\n number, \r\n counter,\r\n generation,\r\n generation_list_come,\r\n generation_list_wait,\r\n generation_list_begin,\r\n generation_list_finish,\r\n df_simtime,\r\n generation_list_name,\r\n g1_list_name): \r\n# global g1_list_name\r\n for i in range(number):\r\n if i == 0:\r\n t = generation_list_come[i]#到达时间服从指数分布,此处的t为间隔时间\r\n else:\r\n t = generation_list_come[i] - generation_list_come[i-1]\r\n yield env.timeout(t)\r\n serve_time = np.random.choice(df_simtime['sim_time'])#得到模拟数据\r\n # print(serve_time)\r\n c = document(env, \r\n g1_list_name[i], \r\n generation, \r\n counter, \r\n time_in_fac,\r\n generation_list_begin,\r\n generation_list_wait,\r\n generation_list_finish,\r\n serve_time,\r\n generation_list_name)\r\n env.process(c)", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def scan(self, mask):", "def rng_fn_scipy(cls, rng, *args, **kwargs):", "def rng():\n return numpy.random.default_rng(564)", "def robot_result_handler(self, _: Result):\n # Make a choice for each variable\n output = self.opt_spec.get_sample_template()\n for path, (low, high) in zip(self.opt_spec.inputs, self.opt_spec.search_space):\n choices = np.linspace(low, high, self.opt_spec.points_per_axis)\n choice = np.random.choice(choices)\n output.inputs[path] = choice\n\n # Send it to the robot\n send_new_sample(output)\n return", "def projection(self, what='input'):\n from copy import copy, deepcopy\n\n new = Automaton()\n # TODO: use empty_copy() in order to\n # preserve on_duplicate_transition and future extensions.\n # for this, empty_copy would need a new optional argument\n # use_class=None ?\n\n if what == 'input':\n new.input_alphabet = copy(self.input_alphabet)\n elif what == 'output':\n new.input_alphabet = copy(self.output_alphabet)\n else:\n raise NotImplementedError\n\n state_mapping = {}\n for state in self.iter_states():\n state_mapping[state] = new.add_state(deepcopy(state))\n for transition in self.iter_transitions():\n if what == 'input':\n new_word_in = transition.word_in\n elif what == 'output':\n new_word_in = transition.word_out\n else:\n raise NotImplementedError\n new.add_transition((state_mapping[transition.from_state],\n state_mapping[transition.to_state],\n new_word_in, None))\n\n if what == 'output':\n states = [s for s in self.iter_final_states() if s.final_word_out]\n if not states:\n return new\n number = 0\n while new.has_state(('final', number)):\n number += 1\n final = new.add_state(('final', number))\n final.is_final = True\n for state in states:\n output = state.final_word_out\n new.state(state_mapping[state]).final_word_out = []\n new.state(state_mapping[state]).is_final = False\n new.add_transition((state_mapping[state], final, output, None))\n\n return new", "def _candidates(self, token):", "def create(seed, model, tokenizer, temp=0.5):\n\n dictionary = [\"\"] + list(tokenizer.index_word.values())\n start = np.array(tokenizer.texts_to_sequences(seed)).reshape(1, -1)\n if seed[0] == '<start>':\n output = [seed[-1]]\n else:\n output = seed[:]\n\n for _ in range(45):\n weights = reweight_distribution(model.predict(start), temperature=temp)\n word = np.random.choice(dictionary, size=1, p=weights[0, :])[0]\n if word == '<end>': \n if len(output) > 10:\n break\n else:\n continue\n output.append(word)\n start = np.append(start[0, 1:], tokenizer.texts_to_sequences([word])).reshape(1, -1)\n return \" \".join(output)", "def process(self):\n # Process value 1, 2, ..., 9 in order\n for val in range(1, 10):\n # For each row\n for x in range(0, 9):\n exist = False\n can_enter = []\n for y in range(0, 9):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append(y)\n\n # Put val if only one cell can do\n if not exist and len(can_enter) == 1:\n y = can_enter[0]\n self.put(x, y, val)\n \n # For each column\n for y in range(0, 9):\n exist = False\n can_enter = []\n for x in range(0, 9):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append(x)\n\n # Put val in only one cell can do\n if not exist and len(can_enter) == 1:\n x = can_enter[0]\n self.put(x, y, val)\n \n # For each block\n for bx in range(0, 3):\n for by in range(0, 3):\n exist = False\n can_enter = []\n for x in range(bx * 3, (bx + 1) * 3):\n for y in range(by * 3, (by + 1) * 3):\n if self.field[x][y] == val:\n exist = True\n if self.field[x][y] == -1 and val in self.choices[x][y]:\n can_enter.append((x, y))\n \n # Put val if only one cell can do\n if not exist and len(can_enter) == 1:\n x = can_enter[0][0]\n y = can_enter[0][1]\n self.put(x, y, val)", "def r(w,rangestart,rangeend):\r\n if w == 'r':\r\n print(random.random(rangestart , rangeend))\r\n if w == 'ri':\r\n print(random.randint(rangestart,rangeend))", "def apply_tournament_selection(individuals, tot_rounds: int):\r\n winner = random.choice(individuals)\r\n\r\n for i in range(0, tot_rounds-1):\r\n ind = random.choice(individuals)\r\n\r\n if ind.fitness.dominates(winner.fitness):\r\n winner = ind\r\n elif not winner.fitness.dominates(ind.fitness):\r\n if ind.fitness.crowding_dist < winner.fitness.crowding_dist:\r\n winner = ind\r\n\r\n return winner", "def rng():\n return np.random.default_rng()", "def __call__(self, args):\n if isinstance(self.transform, list) and len(self.transform) > 1:\n result = self.regions_mask[x, y]\n unique_regions = np.unique(result)\n for i in unique_regions:\n indices = result==i\n transform=self.get_forward_transform(i)\n result[indices]=transform(x[indices], y[indices])\n print('resut', result)\n return result\n else:\n return self.transform(x, y)", "def rng(x):\n\n\tm = 2**31 - 1\n\ta = 48271\n\tc = 0\n\treturn (a*x + c)%m", "def helper(self,result_collector,part_result,candidates,target,startfrom):\n\t\tif target==0:\n\t\t return part_result\n\t\tif target<0:\n\t\t return []\n\t\tpart_result.append(-1)\n\t\tfor i in xrange(startfrom,len(candidates)):\n\t\t if i>startfrom and candidates[i]==candidates[i-1]:\n\t\t continue\n\t\t newtarget=target-candidates[i]\n\t\t part_result[-1]=candidates[i]\n\t\t if newtarget<0:\n\t\t break\n\t\t if newtarget==0:\n\t\t result_collector.append(copy.deepcopy(part_result))\n\t\t break\n\t\t self.helper(result_collector,part_result,candidates,newtarget,i+1)\n\t\tdel part_result[-1]", "def pick(self, target: int) -> int:\n\t\tans = None\n cnt = 0\n for i, x in enumerate(self.nums): \n if x == target: \n cnt += 1\n if randint(1, cnt) == cnt: ans = i # prob 1/cnt\n return ans", "def __init__(self, lower_bound, upper_bound):\n # Todo: implement\n self.x = np.zeros(np.size(lower_bound))\n self.v = np.zeros(np.size(lower_bound))\n self.value = -inf\n self.evaluated = False\n i = 0\n while i < np.size(lower_bound):\n self.x[i] = random.uniform(lower_bound[i], upper_bound[i])\n self.v[i] = random.uniform(-(upper_bound[i] - lower_bound[i]), upper_bound[i] - lower_bound[i])\n i = i + 1\n self.best_position = self.x\n self.best_value = -inf", "def __call__(self, results):\n h, w, c = results['img'].shape\n n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)\n for _ in range(n_holes):\n x1 = np.random.randint(0, w)\n y1 = np.random.randint(0, h)\n index = np.random.randint(0, len(self.candidates))\n if not self.with_ratio:\n cutout_w, cutout_h = self.candidates[index]\n else:\n cutout_w = int(self.candidates[index][0] * w)\n cutout_h = int(self.candidates[index][1] * h)\n\n x2 = np.clip(x1 + cutout_w, 0, w)\n y2 = np.clip(y1 + cutout_h, 0, h)\n results['img'][y1:y2, x1:x2, :] = self.fill_in\n\n return results", "def additionalMatch(handIn, indx):", "def _process_input_seed(self):\n\n Tcmb = 2.72548 * u.K # 0.00057 K\n Tfir = 70 * u.K\n ufir = 0.2 * u.eV / u.cm ** 3\n Tnir = 5000 * u.K\n unir = 0.2 * u.eV / u.cm ** 3\n\n # Allow for seed_photon_fields definitions of the type 'CMB-NIR-FIR' or 'CMB'\n if type(self.seed_photon_fields) != list:\n self.seed_photon_fields = self.seed_photon_fields.split('-')\n\n self.seeduf = {}\n self.seedT = {}\n self.seedisotropic = {}\n self.seedtheta = {}\n for idx, inseed in enumerate(self.seed_photon_fields):\n if isinstance(inseed, six.string_types):\n if inseed == 'CMB':\n self.seedT[inseed] = Tcmb\n self.seeduf[inseed] = 1.0\n self.seedisotropic[inseed] = True\n elif inseed == 'FIR':\n self.seedT[inseed] = Tfir\n self.seeduf[inseed] = (ufir / (ar * Tfir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n elif inseed == 'NIR':\n self.seedT[inseed] = Tnir\n self.seeduf[inseed] = (unir / (ar * Tnir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n else:\n log.warning('Will not use seed {0} because it is not '\n 'CMB, FIR or NIR'.format(inseed))\n raise TypeError\n elif type(inseed) == list and (len(inseed) == 3 or len(inseed) == 4):\n isotropic = len(inseed) == 3\n\n if isotropic:\n name, T, uu = inseed\n self.seedisotropic[name] = True\n else:\n name, T, uu, theta = inseed\n self.seedisotropic[name] = False\n self.seedtheta[name] = validate_scalar('{0}-theta'.format(name),\n theta, physical_type='angle')\n\n validate_scalar('{0}-T'.format(name), T, domain='positive',\n physical_type='temperature')\n self.seed_photon_fields[idx] = name\n self.seedT[name] = T\n if uu == 0:\n self.seeduf[name] = 1.0\n else:\n # pressure has same physical type as energy density\n validate_scalar('{0}-u'.format(name), uu,\n domain='positive', physical_type='pressure')\n self.seeduf[name] = (uu / (ar * T ** 4)).decompose()\n else:\n log.warning(\n 'Unable to process seed photon field: {0}'.format(inseed))\n raise TypeError", "def input_output(self, input):\n for l in self.layers:\n p_h = l.input_output(input)\n input = (np.random.random_sample(l.output_size) < p_h) * 1\n return input", "def fit(self, data):\n self.seed = random.choice(range(100))", "def repair(self, x, rnd=rand):\n\t\tir = where(x < self.Lower)\n\t\tx[ir] = rnd.uniform(self.Lower[ir], self.Upper[ir])\n\t\tir = where(x > self.Upper)\n\t\tx[ir] = rnd.uniform(self.Lower[ir], self.Upper[ir])\n\t\treturn x", "def target_intersection(self, runid):\n\n def targeting(shuffledict, seg_copy_array, cell_name):\n bedstring = \"\"\n seg_counts_dict = defaultdict(int)\n breakpoint_counts = 0\n sum_counts = 0\n for cell in shuffledict:\n with suppress(IndexError):\n i = len(cell_name)\n cell_label = cell[:i]\n\n if not cell_name == cell_label:\n continue\n\n shuffled_list = shuffledict[cell]\n scipy.random.shuffle(shuffled_list)\n sum_counts += sum(shuffled_list)\n\n for i in range(len(shuffled_list)):\n if shuffled_list[i] == 0:\n continue\n\n breakpoint_counts += 1\n segment_index = i\n if i == 0:\n segment_index = 1\n\n chrm = seg_copy_array[seg_copy_array[:, 0] == segment_index][0, 1].decode()\n chrom_slice = seg_copy_array[seg_copy_array[:, 1] == chrm.encode()]\n chrom_seg_count = chrom_slice.shape[0]\n start_seg = segment_index\n stop_seg = segment_index+1\n\n # Prevents us from running past the end of the chromosome\n if segment_index+1 > chrom_seg_count:\n stop_seg = segment_index\n start_seg = segment_index-1\n\n coord_start = int(seg_copy_array[seg_copy_array[:, 0] == start_seg][0, 2])\n coord_stop = int(seg_copy_array[seg_copy_array[:, 0] == stop_seg][0, 3])\n\n segkey = \"{}.{}\".format(chrm, coord_start)\n seg_counts_dict[segkey] += 1\n bedstring += \"{0} {1} {2} {3} {0}|{1}|{2}|{3}\\n\".format(chrm, coord_start, coord_stop, \"x\")\n\n if eval(self.args.PairedBreakpoints):\n segment_index = shuffled_list[i]+i\n\n # Since segments are paired we can run past the end of the list.\n if segment_index > len(shuffled_list):\n segment_index = len(shuffled_list)-1\n\n # If the shuffle results in a segment overlap, skip it.\n if not shuffled_list[segment_index] == 0:\n continue\n\n start_seg = segment_index\n stop_seg = segment_index+1\n\n # Prevents us from running past the end of the chromosome by flipping direction of region\n if segment_index + 1 > chrom_seg_count:\n start_seg = shuffled_list[i] - i\n stop_seg = start_seg-1\n\n coor_start = int(seg_copy_array[seg_copy_array[:, 0] == start_seg][0, 2])\n coor_stop = int(seg_copy_array[seg_copy_array[:, 0] == stop_seg][0, 3])\n breakpoint_counts += 1\n segkey = \"{}.{}\".format(chrm, coord_start)\n seg_counts_dict[segkey] += 1\n bedstring += \"{0} {1} {2} {3} {0}|{1}|{2}|{3}\\n\".format(chrm, coor_start, coor_stop, \"x\")\n\n return bedstring, seg_counts_dict, breakpoint_counts\n\n encoded_cell_name = self.args.Cell_Name\n shuffle_dict = self.shuffle_dict_unpaired\n if eval(self.args.PairedBreakpoints):\n shuffle_dict = self.shuffle_dict_pairs\n output_data_dict = defaultdict(lambda: defaultdict(str))\n\n iteration_limit = int(self.args.Iteration_Count)/int(self.args.Spawn)\n iteration_count = 0\n while iteration_count < iteration_limit:\n if iteration_count % int(self.args.Prog_Check) == 0:\n self.log.info(\"Iteration: {} of {} for job {}\".format(iteration_count, iteration_limit, runid))\n\n bed_string, segment_count_dict, total_breakpoints = \\\n targeting(shuffle_dict, self.seg_analyzer.seg_copy_array, encoded_cell_name)\n\n # Bedtool Section.\n breakpoint_bedtool = pybedtools.BedTool(bed_string, from_string=True)\n target_bedtool = pybedtools.BedTool(self.args.Target_File, from_string=False)\n\n # Find target intersects for printing.\n breakpoint_target_intersect = breakpoint_bedtool.intersect(target_bedtool, wb=True, stream=True)\n\n \"\"\"\n The breakpoint target intersect pybedtools object is expected to have this structure;\n l[0] = Breakpoint chrom; l[1] = Breakpoint start coord; l[2] = Breakpoint end coord; \n l[3] = aberration copy type; l[4] = segment ID for internal tracking. The next items are from the target BED \n file. Make sure column 5 in that file is the target name.\n \"\"\"\n\n # Processing Breakpoint Intersects.\n intersect_dict = defaultdict(list)\n total_targeted_breakpoints = 0\n unique_targeted_breakpoints = 0\n\n for l in breakpoint_target_intersect:\n chrom = l[4].split(\"|\")[0]\n start = l[4].split(\"|\")[1]\n segment_key = \"{}.{}\".format(chrom, start)\n intersect_dict[segment_key].append(l[9])\n\n for k in intersect_dict:\n total_targeted_breakpoints += segment_count_dict[k]\n if segment_count_dict[k] > 0:\n unique_targeted_breakpoints += 1\n\n output_data_dict[iteration_count] = \"{}\\t{}\\t{}\\t{}\\n\"\\\n .format(total_breakpoints, total_targeted_breakpoints, len(segment_count_dict), len(intersect_dict))\n\n iteration_count += 1\n\n # Process data for output and write file.\n outstring = \"\"\n\n for k in output_data_dict:\n outstring += output_data_dict[k]\n\n permuted_shuffle_file_name = \\\n \"{}{}{}{}\".format(self.args.Working_Folder, self.args.Cell_Name, self.args.Job_Name, runid)\n permuted_shuffle_file = open(permuted_shuffle_file_name, 'w')\n permuted_shuffle_file.write(outstring)\n permuted_shuffle_file.close()\n\n return", "def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n min_length,\n do_sample,\n early_stopping,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n no_repeat_ngram_size,\n bad_words_ids,\n pad_token_id,\n eos_token_id,\n batch_size,\n num_return_sequences,\n length_penalty,\n num_beams,\n vocab_size,\n encoder_outputs,\n attention_mask,\n use_cache,\n model_specific_kwargs,\n ):\n\n # generated hypotheses\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)\n for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)\n\n # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times\n if do_sample is False:\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # cache compute states\n past = (encoder_outputs, None) if encoder_outputs is not None else None\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n while cur_len < max_length:\n model_inputs = self.model.prepare_inputs_for_generation(\n input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs\n )\n outputs = self.model(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)\n next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)\n\n # if model has past, then set the past variable to speed up decoding\n if self.model._use_cache(outputs, use_cache):\n past = outputs[1]\n if self.model.config.is_encoder_decoder and do_sample is False:\n # TODO (PVP) still a bit hacky here - there might be a better solution\n next_token_logits = self.model.adjust_logits_during_generation(\n next_token_logits, cur_len=cur_len, max_length=max_length\n )\n\n scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)\n\n scores = self.model.postprocess_next_token_scores(\n scores=scores,\n input_ids=input_ids,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n cur_len=cur_len,\n min_length=min_length,\n max_length=max_length,\n eos_token_id=eos_token_id,\n repetition_penalty=repetition_penalty,\n batch_size=batch_size,\n num_beams=num_beams,\n )\n\n assert scores.shape == (batch_size * num_beams, vocab_size), \"Shapes of scores: {} != {}\".format(\n scores.shape, (batch_size * num_beams, vocab_size)\n )\n\n if do_sample:\n _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n # Temperature\n if temperature != 1.0:\n _scores = _scores / temperature\n # Top-p/top-k filtering\n _scores = top_k_top_p_filtering(\n _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together to sample from all beam_idxs\n _scores = _scores.contiguous().view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)\n probs = F.softmax(_scores, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)\n # Compute next scores\n next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)\n # sort the sampled vector to make sure that the first num_beams samples are the best\n next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)\n next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)\n\n else:\n next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n next_scores = next_scores.view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n\n assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)\n\n # next batch beam content\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence, add a pad token\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_id is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content, this will get added to next_batch_beam\n next_sent_beam = []\n\n # next tokens for this sentence\n for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(\n zip(next_tokens[batch_idx], next_scores[batch_idx])\n ):\n # get beam and token IDs\n beam_id = beam_token_id // vocab_size\n token_id = beam_token_id % vocab_size\n\n effective_beam_id = batch_idx * num_beams + beam_id\n # add to generated hypotheses if end of sentence\n if (eos_token_id is not None) and (token_id.item() == eos_token_id):\n # if beam_token does not belong to top num_beams tokens, it should not be added\n is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams\n if is_beam_token_worse_than_top_num_beams:\n continue\n generated_hyps[batch_idx].add(\n input_ids[effective_beam_id].clone(), beam_token_score.item(),\n )\n else:\n # add next predicted token since it is not eos_token\n next_sent_beam.append((beam_token_score, token_id, effective_beam_id))\n\n # once the beam for next step is full, don't add more tokens to it.\n if len(next_sent_beam) == num_beams:\n break\n\n # Check if we are done so that we can save a pad step if all(done)\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item(), cur_len\n )\n\n # update next beam content\n assert len(next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (batch_idx + 1), \"We should have added num_beams each step\"\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_tokens = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new([x[2] for x in next_batch_beam])\n\n # re-order batch and update current length\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)\n cur_len = cur_len + 1\n\n # re-order internal states\n if past is not None:\n past = self.model._reorder_cache(past, beam_idx)\n\n # extend attention_mask for new generated input if only decoder\n if self.model.config.is_encoder_decoder is False:\n attention_mask = torch.cat(\n [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n )\n\n # finalize all open beam hypotheses and add to generated hypotheses\n for batch_idx in range(batch_size):\n if done[batch_idx]:\n continue\n\n # test that beam scores match previously calculated scores if not eos and batch_idx not done\n if eos_token_id is not None and all(\n (token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]\n ):\n assert torch.all(\n next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]\n ), \"If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}\".format(\n next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],\n )\n\n # need to add best num_beams hypotheses to generated hyps\n for beam_id in range(num_beams):\n effective_beam_id = batch_idx * num_beams + beam_id\n final_score = beam_scores[effective_beam_id].item()\n final_tokens = input_ids[effective_beam_id]\n generated_hyps[batch_idx].add(final_tokens, final_score)\n\n # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch\n output_batch_size = batch_size if do_sample else batch_size * num_return_sequences\n output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences\n\n # select the best hypotheses\n sent_lengths = input_ids.new(output_batch_size)\n best = []\n\n # retrieve best hypotheses\n for i, hypotheses in enumerate(generated_hyps):\n sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])\n for j in range(output_num_return_sequences_per_batch):\n effective_batch_idx = output_num_return_sequences_per_batch * i + j\n best_hyp = sorted_hyps.pop()[1]\n sent_lengths[effective_batch_idx] = len(best_hyp)\n best.append(best_hyp)\n\n # shorter batches are padded\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined\"\n sent_max_len = min(sent_lengths.max().item() + 1, max_length)\n decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)\n\n # fill with hypothesis and eos_token_id if necessary\n for i, hypo in enumerate(best):\n decoded[i, : sent_lengths[i]] = hypo\n if sent_lengths[i] < max_length:\n decoded[i, sent_lengths[i]] = eos_token_id\n else:\n # none of the hypotheses have an eos_token\n assert (len(hypo) == max_length for hypo in best)\n # decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)\n decoded = torch.stack(best).type_as(input_ids)\n\n return decoded", "def search(values):\n\n\tif values is False:\n\t\treturn values\n\n\tvalues = reduce_puzzle(values)\n\n\tunsolved = [box for box in boxes if len(values[box]) > 1]\n\n\tif len(unsolved) == 0:\n\t\treturn values\n\t\n\tstart_box = unsolved[0]\n\n\tfor digit in values[start_box]:\n\t\tnew_values = values.copy()\n\t\tnew_values[start_box] = digit\n\t\tattempt = search(new_values)\n\t\t\n\t\tif attempt:\n\t\t\treturn attempt", "def localGenerateInput(self, model, oldInput):\n try:\n pt,weight = self.sparseGrid[self.counter-1]\n except IndexError:\n raise utils.NoMoreSamplesNeeded\n\n for v, varName in enumerate(self.sparseGrid.varNames):\n # compute the SampledVarsPb for 1-D distribution\n if self.variables2distributionsMapping[varName]['totDim'] == 1:\n for key in varName.strip().split(','):\n self.values[key] = pt[v]\n self.inputInfo['SampledVarsPb'][varName] = self.distDict[varName].pdf(pt[v])\n self.inputInfo['ProbabilityWeight-'+varName] = self.inputInfo['SampledVarsPb'][varName]\n # compute the SampledVarsPb for N-D distribution\n # Assume only one N-D distribution is associated with sparse grid collocation method\n elif self.variables2distributionsMapping[varName]['totDim'] > 1 and self.variables2distributionsMapping[varName]['reducedDim'] ==1:\n dist = self.variables2distributionsMapping[varName]['name']\n ndCoordinates = np.zeros(len(self.distributions2variablesMapping[dist]))\n positionList = self.distributions2variablesIndexList[dist]\n for varDict in self.distributions2variablesMapping[dist]:\n var = utils.first(varDict.keys())\n position = utils.first(varDict.values())\n location = -1\n for key in var.strip().split(','):\n if key in self.sparseGrid.varNames:\n location = self.sparseGrid.varNames.index(key)\n break\n if location > -1:\n ndCoordinates[positionList.index(position)] = pt[location]\n else:\n self.raiseAnError(IOError, f'The variables {var} listed in sparse grid collocation sampler, but not used in the ROM!' )\n for key in var.strip().split(','):\n self.values[key] = pt[location]\n self.inputInfo['SampledVarsPb'][varName] = self.distDict[varName].pdf(ndCoordinates)\n self.inputInfo['ProbabilityWeight-'+dist] = self.inputInfo['SampledVarsPb'][varName]\n\n self.inputInfo['ProbabilityWeight'] = weight\n self.inputInfo['PointProbability'] = reduce(mul,self.inputInfo['SampledVarsPb'].values())\n self.inputInfo['SamplerType'] = 'Sparse Grid Collocation'", "def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):\n if isinstance(surf_or_rad, dict):\n surf = surf_or_rad\n logger.info('Guess surface (%s) is in %s coordinates'\n % (_bem_explain_surface(surf['id']),\n _coord_frame_name(surf['coord_frame'])))\n else:\n radius = surf_or_rad[0]\n logger.info('Making a spherical guess space with radius %7.1f mm...'\n % (1000 * radius))\n surf = _get_ico_surface(3)\n _normalize_vectors(surf['rr'])\n surf['rr'] *= radius\n surf['rr'] += r0\n logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))\n src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,\n do_neighbors=False, n_jobs=n_jobs)\n # simplify the result to make things easier later\n src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],\n nuse=src['nuse'], coord_frame=src['coord_frame'],\n vertno=np.arange(src['nuse']))\n return SourceSpaces([src])", "def transform():", "def _generate_raw_environments(self, num, seed):", "def mutate(individual):\n for idx, q in enumerate(individual):\n rng = random.random()\n if rng < 1 / (len(individual)):\n pos = random.randint(1, len(individual))\n individual[idx] = pos\n return individual", "def create_fixed_generator(anchor_boxes, valid_indices,\n lower_threshold, upper_threshold,\n ratio=1., metric='iou', minibatch_size=256, seed=42):\n assert minibatch_size <= len(valid_indices), 'Minibatch length must be greater than valid regions number'\n assert metric in _metrics.keys(), 'Only available metrics are \\'iou\\', \\'positive_overlap\\' and \\'overlap\\''\n valid_ab = anchor_boxes[valid_indices]\n compute_metric = _metrics[metric](valid_ab)\n neg_samples = floor(minibatch_size / (1 + ratio))\n pos_samples = ceil(neg_samples * ratio)\n targets_shape = (len(anchor_boxes), 5)\n random_generator = np.random.default_rng(seed=seed)\n\n def targets_generator(gt_boxes):\n metrics, gt_boxes = compute_metric(gt_boxes)\n neg_ind = np.flatnonzero(metrics < lower_threshold)\n pos_ind = np.flatnonzero(metrics > upper_threshold)\n\n if len(neg_ind) > neg_samples:\n neg_ind = random_generator.choice(neg_ind, neg_samples, replace=False)\n elif len(neg_ind) < neg_samples:\n neg_ind = np.argpartition(metrics, neg_samples)[:neg_samples]\n if len(pos_ind) > pos_samples:\n pos_ind = random_generator.choice(pos_ind, pos_samples, replace=False)\n elif len(pos_ind) < pos_samples:\n pos_ind = np.argpartition(metrics, len(metrics) - pos_samples)[-pos_samples:]\n labels = np.full_like(metrics, -1, dtype='int')\n labels[pos_ind] = 1\n labels[neg_ind] = 0\n\n deltas = np.full_like(gt_boxes, 0, dtype='float')\n deltas[pos_ind] = compute_deltas(valid_ab[pos_ind], gt_boxes[pos_ind])\n\n targets = np.zeros(targets_shape, dtype='float')\n targets[:, 0] = -1\n targets[valid_indices] = np.hstack([labels[:, np.newaxis], deltas])\n # Since there is no way to give a loss function two tensors,\n # we have to make one, containing all required labels\n return targets\n return targets_generator", "def generate_guess_value(min_guess_range=1, max_guess_range=10):\n\treturn random.randrange(min_guess_range, max_guess_range), min_guess_range, max_guess_range", "def merge_ranges():", "def transform(a):\n if a not in rules:\n return a\n elif isnondet(rules[a]): \n return rand(rules[a])\n else:\n return rules[a]", "def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n repeat_ = num_expected // pos_inds.numel()\n return torch.cat((pos_inds.repeat(repeat_), self.random_choice(pos_inds, num_expected % pos_inds.numel())))\n else:\n return self.random_choice(pos_inds, num_expected)", "def sample_beam_search(self, features, states=None):\n sampled_ids = []\n inputs = features.unsqueeze(0)\n beam_size = 5\n candidates = []\n all_candidates = []\n for i in range(30): # maximum sampling length\n if i==0:\n hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size), \n outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size) \n predictions = torch.topk(outputs,beam_size)\n for k in range(beam_size):\n candidates.append([predictions[1][0][k], predictions[0][0][k].cpu().data.numpy()[0] , hiddens , states]) \n else: \n all_candidates = []\n for k in range(beam_size):\n candidate = candidates[k]\n inputs = self.embed(candidate[0][len(candidate[0])-1])\n inputs = inputs.unsqueeze(0)\n # print(inputs)\n hiddens, states = self.lstm(inputs, candidate[3]) # (batch_size, 1, hidden_size), \n outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size) \n predictions = torch.topk(outputs,beam_size)\n for k in range(beam_size):\n new_candidate = [torch.cat((candidate[0],predictions[1][0][k]),0),candidate[1] + predictions[0][0][k].cpu().data.numpy()[0], hiddens, states]\n all_candidates.append(new_candidate)\n ordered = sorted(all_candidates, key=lambda tup:tup[1], reverse = True)\n candidates = ordered[:beam_size]\n sampled_ids = candidates[0][0]\n return sampled_ids.squeeze()", "def findRandomPlaceTo(place_from, destination_candidates):\n place_to_candidate = random.choice(destination_candidates)\n if place_to_candidate in place_from:\n return findRandomPlaceTo(place_from, destination_candidates)\n else:\n return place_to_candidate", "def solution(self) -> State:", "def output_input(self, output):\n for l in reversed(self.layers):\n p_v = l.output_input(output)\n output = (np.random.random_sample(l.input_size) < p_v) * 1\n return output", "def fill_in(formula):\n\tletters = ''.join(set(re.findall(r'[A-Z]', formula)))\n\tfor digits in itertools.permutations('1234567890', len(letters)):\n\t\ttable = string.maketrans(letters, ''.join(digits))\n\t\tyield formula.translate(table)", "async def rng(self, ctx, _range=1):\n content = rd.randint(0, int(_range))\n await ctx.send(content)\n self.logger.info(misolog.format_log(ctx, f\"range=[0,{_range}], result={content}\"))", "def RNAorDNA ( seq ) :\n\tif dna_regex . search ( seq ):\n\t\treturn RNA ( seq )\n\n\tif rna_regex . search ( seq ):\n\t\treturn DNA ( seq )", "def create_new_guess():\n next_choice = next(permutation_iterator) \n while inconsistent(next_choice, guesses):\n try:\n next_choice = next(permutation_iterator)\n except StopIteration:\n print(\"Error: Your answers were inconsistent!\")\n return ()\n return next_choice", "def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len):\n # print(\"rages\", tgt_start, tgt_end, src_start, src_end)\n if tgt_end < 0:\n return \n # If `src_align_idx` out of the `src_start` and `src_target`.\n for src_align_idx, tgt_align_idx in alignment:\n # target align point\n # sorce align point out of range\n if ((tgt_start <= tgt_align_idx <= tgt_end) and \n (src_align_idx < src_start or src_align_idx > src_end)): \n return\n phrase_set = set()\n ts = tgt_start # For increment\n while True:\n te = min(tgt_end, ts+max_phrase_len-1) # For decrement\n # te = tgt_end \n while True:\n # Add phrase pair (src_start, src_end, tgt_start, tgt_end)\n src_phrase = \" \".join(src_sent[i] for i in range(src_start,src_end+1))\n tgt_phrase = \" \".join(tgt_sent[i] for i in range(ts,te+1))\n phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase))\n te+= 1\n # Add phrase until `te` aligned or out of range\n if te in tgt_aligned or te == tgt_len:\n break\n ts-=1\n # Add phrase until `te` aligned or out of range\n if ts in tgt_aligned or ts < 0:\n break\n \n return phrase_set", "def default_mutate(search_space, rng, old_value, **kwargs):\n multiply_factor = kwargs.pop(\"multiply_factor\", 3.0)\n add_factor = kwargs.pop(\"add_factor\", 1)\n volatility = kwargs.pop(\"volatility\", 0.001)\n if search_space.type == \"real\":\n lower_bound, upper_bound = search_space.interval()\n factors = (\n 1.0 / multiply_factor\n + (multiply_factor - 1.0 / multiply_factor) * rng.random()\n )\n if lower_bound <= old_value * factors <= upper_bound:\n new_value = old_value * factors\n elif lower_bound > old_value * factors:\n new_value = lower_bound + volatility * rng.random()\n else:\n new_value = upper_bound - volatility * rng.random()\n elif search_space.type == \"integer\":\n print(search_space)\n lower_bound, upper_bound = search_space.interval()\n factors = int(add_factor * (2 * rng.randint(2) - 1))\n if lower_bound <= old_value + factors <= upper_bound:\n new_value = int(old_value) + factors\n elif lower_bound > old_value + factors:\n new_value = int(lower_bound)\n else:\n new_value = int(upper_bound)\n elif search_space.type == \"categorical\":\n # TODO: This ignores the probabilities passed to search space.\n # The mutation function should work directly at the search space level\n # instead of separately on each dimensions. This would make it possible\n # to sample properly the categorical dimensions.\n new_value = rng.choice(search_space.interval())\n else:\n print(search_space.type)\n new_value = old_value\n return new_value", "def RANSACHomography(xy_src, xy_ref, num_iter, tol):\n assert isinstance(xy_src, np.ndarray)\n assert isinstance(xy_ref, np.ndarray)\n assert xy_src.shape == xy_ref.shape\n assert xy_src.shape[1] == 2\n assert isinstance(num_iter, int)\n assert isinstance(tol, (int, float))\n tol = tol*1.0\n\n # START\n # 현재까지의 match 수의 최댓값을 기록할 변수 largest를 0으로 초기화합니다.\n largest = 0\n\n for _ in range(num_iter):\n # 우선 전체 match 중 무작위로 4개의 match를 뽑습니다.\n # 무작위 인덱스를 뽑은 뒤 해당 하는 keypoint를\n # xy_src, xy_ref에서 뽑아내는 방식으로 진행합니다.\n index_sample = random.sample(range(xy_ref.shape[0]),4)\n xy_src_sample = xy_src[index_sample]\n xy_ref_sample = xy_ref[index_sample]\n\n # homography를 구하기 위해 xy_src_sample와 xy_ref_sample의 좌표를 이용하여 행렬 A를 만들 것입니다.\n # 이 때 A를\n # [[1 1 1 0 0 0 -x1' -x1' -x1']\n # [0 0 0 1 1 1 -y1' -y1'-y1']\n # ...] ... (1)\n # 와\n # [[x1 y1 1 x1 y1 1 x1 y1 1]\n # [x1 y1 1 x1 y1 1 x1 y1 1]\n # ...] ... (2)\n # 의 원소끼리의 곱으로 볼 수 있기 때문에, 각각의 행렬을 먼저 계산한 다음 원소끼리 곱해주면\n # 원하는 행렬 A를 얻을 수 있을 것입니다.\n # \n\n # 위의 (1) 행렬을 만듭니다. 이 때 repeat, tile 등 원소를 복제하는 함수들을 사용하였습니다.\n xy_ref_modified = np.repeat(xy_ref_sample.reshape(-1,1),3, axis=1)*-1\n xy_ref_modified[0::2,0] = 1\n xy_ref_modified[1::2,0] = 0\n xy_ref_modified[1::2,1] = 1\n xy_ref_modified[0::2,1] = 0\n xy_ref_modified = np.repeat(xy_ref_modified, 3, axis=1)\n\n # 마찬가지로 (2) 행렬을 만듭니다. \n xy_src_modified = np.concatenate((xy_src_sample, xy_src_sample[:,1:]), axis=1)\n xy_src_modified[:,2] = 1\n xy_src_modified = np.repeat(xy_src_modified, 2, axis=0)\n xy_src_modified = np.tile(xy_src_modified, 3)\n\n # A 행렬을 만들고, A.T*A의 고유값을 구하기 위해 특이값 분해 함수 svd를 사용합니다.\n A = xy_ref_modified * xy_src_modified\n u,s,vt = np.linalg.svd(np.dot(A.T, A))\n\n # sigma(=고유값의 제곱근)가 가장 작은 인덱스를 찾은 후\n # 그에 해당하는 고유벡터를 vt에서 찾아내고, reshape으로 재형성합니다.\n # 이 3x3 행렬이 최종 homography의 후보입니다.\n h_cand = vt[s.argmin()].reshape((3,3))\n h_cand = h_cand / h_cand[2,2]\n\n\n well_proj_points = []\n # Part 2.3 에서 작성했던 projection 함수를 이용해 xy projection point를 얻어냅니다.\n xy_proj = KeypointProjection(xy_src, h_cand)\n\n # projection point와 reference 간의 거리를 모두 계산하여 배열로 만듭니다.\n point_diff = xy_proj - xy_ref\n proj_distances = np.hypot(point_diff[:,0:1], point_diff[:,1:])\n\n # 계산된 거리 중 tol 값보다 작은 경우의 수를 산정하고\n # 이것이 지금까지 나온 어떤 경우보다 많다면\n # largest를 현재 기록으로 갱신하고, 반환할 homography h는 현재 homography로 대체합니다.\n well_proj_points = xy_proj[np.where(proj_distances<=tol)]\n\n if(largest <= len(well_proj_points)):\n h = h_cand\n largest = len(well_proj_points)\n \n\n \n # END\n assert isinstance(h, np.ndarray)\n assert h.shape == (3, 3)\n return h", "def generate(self):\n start = self.nu_idx\n stop = start + self.score\n return self.orig_nu[start:stop]", "def train_generator(path, max_length):\n questions, answers, segment_ids = parse_data(path)\n randns = np.random.random((len(questions)))\n\n for que, ans, segment_id, randn in zip(questions, answers, segment_ids, randns):\n if randn < 0.34:\n input_id = que + ans\n if len(segment_id) - sum(segment_id) >= max_length:\n # 第一个句子长度大于max_length\n continue\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n attention_mask = create_attention_mask_for_seq(segment_id, input_mask)\n elif randn >= 0.34 and randn <= 0.67:\n input_id = que + ans\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n attention_mask = create_attention_mask_for_bi(input_mask)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n elif randn > 0.67 and randn <= 0.83:\n input_id = que + ans\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n attention_mask = create_attention_mask_for_lm(input_mask)\n else:\n input_id = que + ans\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n attention_mask = create_attention_mask_for_lm(input_mask, reverse=True)\n\n features = {'input_ids': input_id,\n 'input_mask': attention_mask,\n 'segment_ids': segment_id,\n 'masked_lm_positions': masked_positions,\n 'masked_lm_ids': masked_ids,\n 'masked_lm_weights': masked_weights}\n assert len(features['input_ids']) == len(features['input_mask']) == len(features['segment_ids']) == len(\n features['masked_lm_positions']) == len(features['masked_lm_ids']) == len(\n features['masked_lm_weights']) == max_length\n yield features", "def only_choice(values):\n for unit in unitlist:\n for digit in \"123456789\":\n boxes = [box for box in unit if digit in values[box]]\n if len(boxes) == 1:\n values[boxes[0]] = digit", "def grads(self, inputs):", "def sample(self, existing_results, num_samples):\n new_samples = set()\n existing_samples = list(existing_results.keys())\n existing_samples.sort()\n\n if self.max is not None and self.max not in existing_results and len(new_samples) < num_samples:\n new_samples.add(self.max)\n elif self.max is None and len(new_samples) < num_samples:\n new_samples.add(max(existing_samples) * 2)\n\n if self.min is not None and self.min not in existing_results and len(new_samples) < num_samples:\n new_samples.add(self.min)\n elif self.min is None and len(new_samples) < num_samples:\n new_samples.add(min(existing_samples) * 2)\n\n if (self.max is not None and self.min is not None and (self.max - self.min) / 2 not in existing_results and\n len(new_samples) < num_samples):\n new_samples.add(0.5 * (self.max - self.min))\n\n if len(existing_results) > 2 and len(new_samples) < num_samples:\n gradients = [(existing_results[existing_samples[i]] - existing_results[existing_samples[i-1]]) /\n (existing_samples[i] - existing_samples[i-1]) for i in range(1, len(existing_samples))]\n\n candidate_samples = []\n for i in range(1, len(existing_samples)):\n candidate_sample = 0.5 * (existing_samples[i] - existing_samples[i-1])\n gradient = gradients[i-1]\n if i > 2:\n score +=\n\n # Sort the candidate samples by score\n candidate_samples.sort(key=operator.itemgetter(1), reverse=True)\n for i in range(0, min(len(candidate_samples), ))\n\n return new_samples", "def map_addr_tree_app(s, d, tors):\n if len(tors) < 2:\n print('map_addr_tree: Error: len(tors) < 2')\n eee\n tors1 = tors[:] \n n = len(tors)\n s_out = random.randint(0, n-1) + 1\n s_out = tors[s_out-1]\n tors1.remove(s_out)\n\n d_out = 'e1'\n return s_out, d_out", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def _get_random_input_state(self, seed: int = None) -> None:\n\n random.seed(seed)\n size = self.circ_size\n # Determine how many qubits to set to the 1 state\n num_winners = random.randint(1, size)\n\n # Now that we know how many, which ones?\n winners = random.sample(range(size), num_winners)\n\n for winner in winners:\n self.x(winner)", "def process_input(self,r,g,b):\n pass", "def initialise_rng(self):\n\n\t\tself.rng = numpy.random.RandomState()", "def generate_rng_dict(base_rng):\n keys = ('dropout', 'stochastic_depth', 'rng')\n rngs = jax.random.split(base_rng, len(keys))\n return {key: rngs[i] for i, key in enumerate(keys)}", "def localGenerateInput(self, model, myInput):\n # create values dictionary\n weight = 1.0\n for key in sorted(self.distDict):\n # check if the key is a comma separated list of strings\n # in this case, the user wants to sample the comma separated variables with the same sampled value => link the value to all comma separated variables\n totDim = self.variables2distributionsMapping[key]['totDim']\n dist = self.variables2distributionsMapping[key]['name']\n reducedDim = self.variables2distributionsMapping[key]['reducedDim']\n weight = 1.0\n if totDim == 1:\n if self.samplingType == 'uniform':\n distData = self.distDict[key].getCrowDistDict()\n if ('xMin' not in distData.keys()) or ('xMax' not in distData.keys()):\n self.raiseAnError(IOError,\"In the Monte-Carlo sampler a uniform sampling type has been chosen;\"\n + \" however, one or more distributions have not specified either the lowerBound or the upperBound\")\n lower = distData['xMin']\n upper = distData['xMax']\n rvsnum = lower + (upper - lower) * randomUtils.random()\n # TODO (wangc): I think the calculation for epsilon need to be updated as following\n # epsilon = (upper-lower)/(self.limit+1) * 0.5\n epsilon = (upper-lower)/self.limit\n midPlusCDF = self.distDict[key].cdf(rvsnum + epsilon)\n midMinusCDF = self.distDict[key].cdf(rvsnum - epsilon)\n weight *= midPlusCDF - midMinusCDF\n else:\n rvsnum = self.distDict[key].rvs()\n for kkey in key.split(','):\n self.values[kkey] = np.atleast_1d(rvsnum)[0]\n self.inputInfo['SampledVarsPb'][key] = self.distDict[key].pdf(rvsnum)\n self.inputInfo['ProbabilityWeight-' + key] = 1.\n elif totDim > 1:\n if reducedDim == 1:\n if self.samplingType is None:\n rvsnum = self.distDict[key].rvs()\n coordinate = np.atleast_1d(rvsnum).tolist()\n else:\n coordinate = np.zeros(totDim)\n for i in range(totDim):\n lower = self.distDict[key].returnLowerBound(i)\n upper = self.distDict[key].returnUpperBound(i)\n coordinate[i] = lower + (upper - lower) * randomUtils.random()\n if reducedDim > len(coordinate):\n self.raiseAnError(IOError, \"The dimension defined for variables drew from the multivariate normal distribution is exceeded by the dimension used in Distribution (MultivariateNormal) \")\n probabilityValue = self.distDict[key].pdf(coordinate)\n self.inputInfo['SampledVarsPb'][key] = probabilityValue\n for var in self.distributions2variablesMapping[dist]:\n varID = utils.first(var.keys())\n varDim = var[varID]\n for kkey in varID.strip().split(','):\n self.values[kkey] = np.atleast_1d(rvsnum)[varDim-1]\n self.inputInfo[f'ProbabilityWeight-{dist}'] = 1.\n else:\n self.raiseAnError(IOError, \"Total dimension for given distribution should be >= 1\")\n\n if len(self.inputInfo['SampledVarsPb'].keys()) > 0:\n self.inputInfo['PointProbability'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())\n else:\n self.inputInfo['PointProbability'] = 1.0\n if self.samplingType == 'uniform':\n self.inputInfo['ProbabilityWeight' ] = weight\n else:\n self.inputInfo['ProbabilityWeight' ] = 1.0 # MC weight is 1/N => weight is one\n self.inputInfo['SamplerType'] = 'MonteCarlo'", "def match(self, inp):\n return 0", "def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n min_length,\n do_sample,\n early_stopping,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n no_repeat_ngram_size,\n bad_words_ids,\n pad_token_id,\n eos_token_id,\n batch_size,\n num_return_sequences,\n length_penalty,\n num_beams,\n vocab_size,\n encoder_outputs,\n attention_mask,\n use_cache,\n model_specific_kwargs,\n):\n # generated hypotheses\n eos_token_id = 198 # newline\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)\n for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)\n\n # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times\n if do_sample is False:\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # cache compute states\n past = (encoder_outputs, None) if encoder_outputs is not None else None\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(\n input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs\n )\n outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)\n next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)\n\n # if model has past, then set the past variable to speed up decoding\n if self._use_cache(outputs, use_cache):\n past = outputs[1]\n if self.config.is_encoder_decoder and do_sample is False:\n # TODO (PVP) still a bit hacky here - there might be a better solution\n next_token_logits = self.adjust_logits_during_generation(\n next_token_logits, cur_len=cur_len, max_length=max_length\n )\n\n scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)\n\n scores = self.postprocess_next_token_scores(\n scores=scores,\n input_ids=input_ids,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n cur_len=cur_len,\n min_length=min_length,\n max_length=max_length,\n eos_token_id=eos_token_id,\n repetition_penalty=repetition_penalty,\n batch_size=batch_size,\n num_beams=num_beams,\n )\n\n assert scores.shape == (batch_size * num_beams, vocab_size), \"Shapes of scores: {} != {}\".format(\n scores.shape, (batch_size * num_beams, vocab_size)\n )\n\n if do_sample:\n _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n # Temperature\n if temperature != 1.0:\n _scores = _scores / temperature\n # Top-p/top-k filtering\n _scores = top_k_top_p_filtering(\n _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together to sample from all beam_idxs\n _scores = _scores.contiguous().view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)\n probs = F.softmax(_scores, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)\n # Compute next scores\n next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)\n # sort the sampled vector to make sure that the first num_beams samples are the best\n next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)\n next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)\n\n else:\n next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n next_scores = next_scores.view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n\n assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)\n\n # next batch beam content\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence, add a pad token\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_id is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content, this will get added to next_batch_beam\n next_sent_beam = []\n\n # next tokens for this sentence\n for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(\n zip(next_tokens[batch_idx], next_scores[batch_idx])\n ):\n # get beam and token IDs\n beam_id = beam_token_id // vocab_size\n token_id = beam_token_id % vocab_size\n\n effective_beam_id = batch_idx * num_beams + beam_id\n # add to generated hypotheses if end of sentence (eos or newline)\n if ((eos_token_id is not None) and (token_id.item() == eos_token_id)):\n # if beam_token does not belong to top num_beams tokens, it should not be added\n is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams\n if is_beam_token_worse_than_top_num_beams:\n continue\n generated_hyps[batch_idx].add(\n input_ids[effective_beam_id].clone(), beam_token_score.item(),\n )\n else:\n # add next predicted token since it is not eos_token\n next_sent_beam.append((beam_token_score, token_id, effective_beam_id))\n\n # once the beam for next step is full, don't add more tokens to it.\n if len(next_sent_beam) == num_beams:\n break\n\n # Check if we are done so that we can save a pad step if all(done)\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item(), cur_len\n )\n\n # update next beam content\n assert len(next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (batch_idx + 1), \"We should have added num_beams each step\"\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_tokens = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new([x[2] for x in next_batch_beam])\n\n # re-order batch and update current length\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)\n cur_len = cur_len + 1\n\n # re-order internal states\n if past is not None:\n past = self._reorder_cache(past, beam_idx)\n\n # extend attention_mask for new generated input if only decoder\n if self.config.is_encoder_decoder is False:\n attention_mask = torch.cat(\n [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n )\n\n # finalize all open beam hypotheses and add to generated hypotheses\n for batch_idx in range(batch_size):\n if done[batch_idx]:\n continue\n\n # test that beam scores match previously calculated scores if not eos and batch_idx not done\n if eos_token_id is not None and all(\n ((token_id % vocab_size).item() != eos_token_id) for token_id in next_tokens[batch_idx]\n ):\n assert torch.all(\n next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]\n ), \"If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}\".format(\n next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],\n )\n\n # need to add best num_beams hypotheses to generated hyps\n for beam_id in range(num_beams):\n effective_beam_id = batch_idx * num_beams + beam_id\n final_score = beam_scores[effective_beam_id].item()\n final_tokens = input_ids[effective_beam_id]\n generated_hyps[batch_idx].add(final_tokens, final_score)\n\n # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch\n output_batch_size = batch_size if do_sample else batch_size * num_return_sequences\n output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences\n\n # select the best hypotheses\n sent_lengths = input_ids.new(output_batch_size)\n best = []\n scores = []\n\n # retrieve best hypotheses\n for i, hypotheses in enumerate(generated_hyps):\n sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])\n for j in range(output_num_return_sequences_per_batch):\n effective_batch_idx = output_num_return_sequences_per_batch * i + j\n score, best_hyp = sorted_hyps.pop()\n sent_lengths[effective_batch_idx] = len(best_hyp)\n best.append(best_hyp)\n scores.append(score)\n\n scores = torch.exp(torch.tensor(scores))\n return best, scores", "def rand(jenni, input):\n if input.group(2) == \" \" or not input.group(2):\n jenni.reply(\"I'm sorry, but you must enter at least one number.\")\n else:\n random.seed()\n li_integers = input.group(2)\n li_integers_str = li_integers.split()\n if len(li_integers_str) == 1:\n li_integers_str = re.sub(r'\\D', '', str(li_integers_str))\n if len(li_integers_str) > 0:\n if int(li_integers_str[0]) <= 1:\n a = li_integers_str\n a = int(a)\n if a < 0:\n randinte = random.randint(a, 0)\n if a > 0:\n randinte = random.randint(0, a)\n else:\n a = li_integers_str\n a = int(a)\n randinte = random.randint(0, a)\n jenni.reply(\"your random integer is: \" + str(randinte))\n else:\n jenni.reply(\"lolwut\")\n else:\n ln = li_integers.split()\n if len(ln) == 2:\n a, b = ln\n a = re.sub(r'\\D', u'', a)\n b = re.sub(r'\\D', u'', b)\n if not a:\n a = 0\n if not b:\n b = 0\n a = int(a)\n b = int(b)\n if a <= b:\n randinte = random.randint(a, b)\n else:\n randinte = random.randint(b, a)\n jenni.reply(\"your random integer is: \" + str(randinte))\n else:\n jenni.reply(\"I'm not sure what you want me to do!\")", "def transform(\n self, src_mongodb_uri, src_database, src_collection, lower_bound, upper_bound\n ):\n if src_mongodb_uri != self.dst_mongodb_uri:\n self.clone_collection(src_mongodb_uri, src_database, src_collection)\n col = self.client[src_database][src_collection]\n for skill_start in col.find({\"status\": S_RUNNING}).sort(\"timestamp\", 1):\n for skill_end in (\n col.find(\n {\n \"skill_string\": skill_start[\"skill_string\"],\n \"thread\": skill_start[\"thread\"],\n \"timestamp\": {\"$gt\": skill_start[\"timestamp\"]},\n }\n )\n .sort(\"timestamp\", 1)\n .limit(1)\n ):\n if skill_end[\"status\"] == S_FINAL or skill_end[\"status\"] == S_FAILED:\n name, args = split_skill_string(skill_start[\"skill_string\"])\n lookup_entry = {\n \"_id\": {\n \"thread\": skill_start[\"thread\"],\n \"start_time\": skill_start[\"timestamp\"],\n \"end_time\": skill_end[\"timestamp\"],\n },\n \"outcome\": skill_end[\"status\"],\n \"error\": skill_end[\"error\"],\n \"name\": name,\n \"args\": args,\n \"duration\": time_diff_in_sec(\n skill_end[\"timestamp\"], skill_start[\"timestamp\"]\n ),\n }\n if (\n lookup_entry[\"duration\"] > upper_bound\n or lookup_entry[\"duration\"] < lower_bound\n ):\n print(\n \"duration out of bounds, omitting: {} seconds\\n{}\\n{}\".format(\n lookup_entry[\"duration\"], skill_start, skill_end\n )\n )\n else:\n if not self.lookup_col.find_one(lookup_entry):\n if not self.dry_run:\n self.lookup_col.insert_one(lookup_entry)\n print(\"Adding: {}\".format(lookup_entry))\n else:\n print(\"Entry already present, omitting\")\n if src_mongodb_uri != self.dst_mongodb_uri:\n self.client.drop_database(src_database)", "def sample_search(self):\n result = dict()\n for mutable in self.mutables:\n if isinstance(mutable, LayerChoice):\n gen_index = torch.randint(high=len(mutable), size=(1, ))\n result[mutable.key] = F.one_hot(gen_index, num_classes=len(mutable)).view(-1).bool()\n elif isinstance(mutable, InputChoice):\n if mutable.n_chosen is None:\n result[mutable.key] = torch.randint(high=2, size=(mutable.n_candidates,)).view(-1).bool()\n else:\n perm = torch.randperm(mutable.n_candidates)\n mask = [i in perm[:mutable.n_chosen] for i in range(mutable.n_candidates)]\n result[mutable.key] = torch.tensor(mask, dtype=torch.bool) # pylint: disable=not-callable\n return result", "def get_regions_mask(self, input):", "def rnd_choice(start, end, step, output_type=float):\n nums = np.append(np.arange(start, end, step), end)\n return output_type(np.random.choice(nums))", "def choose_from(seq, random_state):\n return seq[random_state.choice(len(seq))]", "def _candidate_generation(self):\n doc = self.nlp(self.text)\n named_entity_dict = {}\n named_entity_key_list = []\n named_entity_value_list = []\n entity_from_text_list = []\n offline_dic_list = []\n matched_element_list = []\n\n for ent in doc.ents:\n named_entity = (str(ent.text) + ':' + str(ent.label_))\n named_entity = (named_entity.split(':'))\n # named_entity_key = named_entity[0].replace('\\n', '')\n # named_entity_key_list.append(named_entity_key)\n # named_entity_value = named_entity[1].replace('\\n', '')\n # named_entity_value_list.append(named_entity_value)\n named_entity_value = named_entity[1].replace('\\n', '')\n named_entity_value_list.append(named_entity_value)\n filtered_words = (str(ent.text).split())\n filtered_words = [w for w in filtered_words if w.lower() not in self.english_stopwords]\n named_entity_key = [' '.join(filtered_words)]\n for i in named_entity_key:\n named_entity_key_list.append(i)\n for key in named_entity_key_list:\n named_entity_dict[key] = []\n i = 0\n for key in named_entity_key_list:\n named_entity_dict[key].append(named_entity_value_list[i])\n i = i + 1\n\n entities = \"ORG PERSON LOC GPE\".split()\n for entity in entities:\n entity_from_text = [k for k, v in named_entity_dict.items() if entity in v]\n for item in entity_from_text:\n entity_from_text_list.append(item)\n\n if not entity_from_text_list:\n self.logger.info('No named entity found in the input text')\n else:\n self.logger.info('Entities which are identified from the input sentence')\n self.logger.info(entity_from_text_list)\n\n for key, value in self.offline_dic.items():\n offline_dic_list.append(key)\n\n for item in entity_from_text_list:\n for item1 in offline_dic_list:\n if item == item1:\n matched_element_list.append(item)\n\n big_final_dict = []\n for i in matched_element_list:\n candidate_list = [v for k, v in self.offline_dic.items() if str(k) == str(i)]\n final_dict = dict(zip(i.split('\\n'), candidate_list))\n big_final_dict.append(final_dict)\n\n if not big_final_dict:\n self.logger.warning(\"No Match found in the KB\")\n return matched_element_list, None\n else:\n self.logger.info('found entities')\n return matched_element_list, big_final_dict", "def build_candidates(allowed_nodes=[identity], best_candidates=[], nb_candidates=200):\n new_candidates = []\n length_limit = 4 # Maximal length of a program\n def random_node():\n return random.choice(allowed_nodes)\n\n # Until we have enough new candidates\n while (len(new_candidates) < nb_candidates):\n # Add 10 new programs\n for i in range(5):\n new_candidates += [[random_node()]]\n\n # Create new programs based on each best candidate\n for best_program in best_candidates:\n # Add one op on its right but limit the length of the program\n if len(best_program) < length_limit - 1:\n new_candidates += [[random_node()] + best_program]\n # Add one op on its left but limit the length of the program\n if len(best_program) < length_limit - 1:\n new_candidates += [best_program + [random_node()]]\n # Mutate one instruction of the existing program\n new_candidates += [list(best_program)]\n new_candidates[-1][random.randrange(0, len(best_program))] = random_node()\n\n # Truncate if we have too many candidates\n np.random.shuffle(new_candidates)\n return new_candidates[:nb_candidates]", "def sample_selection(attr, old, new):\n if len(new) == 0:\n source.data = source.from_df(merged_data)\n else:\n samples = [s+1 for s in new]\n selected_data = merged_data.loc[merged_data['sample_num'].isin(samples)]\n source.data = source.from_df(selected_data)\n z = np.linspace(min(source.data['redshift']), max(source.data['redshift']), 100)\n cosmo_distmod_range = cosmo.distmod(z=z).value\n source.data['z_range'] = z\n source.data['cosmo_distmod_range'] = cosmo_distmod_range", "def _random_generate_bernstein_ ( fun , num ) :\n xmn = fun.xmin ()\n xmx = fun.xmax ()\n ymx = max ( fun.bernstein().pars() )\n i = 0 \n while i < num : \n x = _uniform_ ( xmn , xmx ) \n y = _uniform_ ( 0 , ymx )\n v = fun ( x )\n if v >= y :\n i+= 1 \n yield x", "def generate_grains(self, cells):\n\t\tfor cell_num in range(cells):\n\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\tsample_cell = sample_cell[0]\n\t\t\twhile sample_cell.state != 0:\n\t\t\t\trandom_row = random.randrange(0,self.space.shape[0],1)\n\t\t\t\tsample_cell = np.random.choice(self.space[random_row],1)\n\t\t\t\tsample_cell = sample_cell[0]\n\t\t\tsample_cell.change_state(self.init_time ,cell_num)", "def genRandTeam(nPos, totPlayers):\n # 0 1 2 3 4 5 6\n # nPos = [nFirst, nSecond, nThird, nShort, nCatcher, nOf, nDh]\n chromosome = []\n sum = 0\n count = 0\n\n\n for i in nPos: # general loop\n if count == 6: # when loop enters the nDh players it instead chooses from ALL positions five times\n for j in range(5): # to represent the 2 util positions and the 3 benches\n rNum = random.randint(0, totPlayers - 1) # random number of ANY player\n chromosome.append(rNum) # picks a random pos\n break # no more work needs to be done\n if count == 5: # this will occur before the previous loop; nOF must be iterated 3 times for 3 outfield spots\n for j in range(2):\n rNum2 = random.randint(0, i - 1)\n chromosome.append(rNum2 + sum) # nOF must be iterated 3 times for 3 outfield spots; i is on oF\n rNum3 = random.randint(0, i - 1)\n chromosome.append(rNum3 + sum)\n sum += i\n count += 1\n # first = random.randint(0,nPos[0])\n # second = random.randint(0,nPos[1])\n # third = random.randint(0,nPos[2])\n # short = random.randint(0,nPos[3])\n # catcher = random.randint(0,nPos[4])\n # of = [random.randint(0,nPos[5]), random.randint(0,nPos[5]), random.randint(0,nPos[5])] #THREE outfielders\n # rNum = [random.randint(0,6) for i in range(5)] #random numbers representing one of the nPos rosters\n # util = [random.randint(0,nPos[rNum[0]]), random.randint(0,nPos[rNum[1]])] #picks 2 random players from ANY roster\n # ben = [random.randint(0,nPos[rNum[2]]), random.randint(0,nPos[rNum[3]]), random.randint(0,nPos[rNum[4]])] # picks 3 random players form any roster\n # print first,second,third,short,catcher,of,util,ben\n # temp = Team()\n return chromosome", "def default_replacement(random, population, parents, offspring, args):\r\n return population" ]
[ "0.5973425", "0.5973425", "0.5430724", "0.5237212", "0.517664", "0.5133807", "0.5112024", "0.50218385", "0.4966752", "0.492342", "0.49195346", "0.48895872", "0.4850323", "0.47207487", "0.46716097", "0.46587282", "0.46179524", "0.4611999", "0.46064407", "0.46034813", "0.45879427", "0.45795736", "0.45778632", "0.4572321", "0.45676747", "0.45657754", "0.45627272", "0.45610777", "0.45556867", "0.4544816", "0.45427632", "0.4541876", "0.4537951", "0.4534259", "0.4520594", "0.44922152", "0.44901076", "0.44851187", "0.44847673", "0.4480729", "0.44804496", "0.44786793", "0.44717997", "0.44622338", "0.4457886", "0.44315934", "0.4431048", "0.4421817", "0.4405545", "0.44037127", "0.4396446", "0.43939453", "0.4390383", "0.43855658", "0.43807316", "0.43771204", "0.43749508", "0.43709907", "0.43687522", "0.43637982", "0.4363748", "0.43506098", "0.43463817", "0.43424562", "0.43423098", "0.4341249", "0.43341038", "0.4332925", "0.43321446", "0.43278217", "0.43271115", "0.43254414", "0.43223938", "0.43206742", "0.4319956", "0.43159655", "0.43129975", "0.43110636", "0.43109295", "0.43063074", "0.43056116", "0.4303392", "0.43025678", "0.42950073", "0.429406", "0.42892537", "0.428657", "0.4284965", "0.428143", "0.42791522", "0.42769912", "0.42755157", "0.4274642", "0.42678282", "0.42669648", "0.42651972", "0.42633384", "0.42629257", "0.42607704", "0.42558515", "0.42551422" ]
0.0
-1
walks through the dict, makes a form
def walk_dict(target_dict, depth=1): stuff = "" def metadata_in_name(target_string, values): """serioulsy""" return 0 in [target_string.find(value) for value in values] for rng_key, rng_val in sorted(target_dict.items(), key=lambda x: x[0]): if isinstance(rng_val, dict): cssclass = "" if metadata_in_name(rng_key, ['zeroOrMore', 'oneOrMore']): cssclass = "class='multiple'" clean_name = rng_key.replace('optional.', '').replace( 'oneOrMore.', '').replace('.data', '').replace( 'zeroOrMore.', '') stuff +="<div class=\"{0}\" >".format(clean_name) stuff += "<h{0} {2} rel='togglable' class=\"{3}_rel\">{1}<span class=\"nice-span glyphicon glyphicon-minus\"></span></h{0}>".format(depth, rng_key, cssclass, clean_name) stuff += "<div class='holder{}'>".format(depth) stuff += walk_dict(rng_val, depth + 1) else: def find_key(a_dict, key): """find keys""" for his_key, his_val in a_dict.items(): if isinstance(his_val, dict): found = find_key(his_val, key) if found: return [his_key] + found elif his_val == key: return [his_key] def make_input_name(value): """makes input name""" values = ['optional', 'value', 'oneOrMore', 'data', "zeroOrMore"] def strip_meta(this_string): """removes metadata""" wot = this_string.replace('optional', '').replace( 'oneOrMore', '').replace('.data', '').replace( 'zeroOrMore', '').replace('.', '') return wot ret = [strip_meta(tag) for tag in find_key(inside, value) if tag not in values] return ".".join(ret) stuff += "\n<div class=\"{0}\"><div style='font-weight:bold;'>{1}</div>".format( make_input_name(rng_val), ".".join(find_key(inside, rng_val))) def val_starts_with(base_string, strings): """ check if str startswith """ for the_string in strings: if base_string.startswith(the_string): return True if len(make_input(rng_val)) < 45: if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']): stuff += "<input class='selectable' value=\"{}\" style='width:87%' name=\"{}\" readonly>".format( make_input(rng_val), make_input_name(rng_val)) else: stuff += "<input class='selectable' value=\"{}\" style='width:87%' name=\"{}\">".format( "", make_input_name(rng_val)) else: if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']): stuff += "<textarea class='selectable' rows='8' cols='120' readonly name=\"{1}\">{0}</textarea>".format( make_input(rng_val), make_input_name(rng_val)) else: stuff += "<textarea class='selectable' rows='8' cols='120'>{0}</textarea>".format( "") stuff += "</div>" stuff+="</div>" stuff += "</div>" return stuff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_form(rng, root_name='ArchiveTransfer'):\n results = rng.to_form()\n inside = results[root_name]\n\n def make_input(value):\n \"\"\" depending on what is found in the rng, make the input\"\"\"\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what\n\n def walk_dict(target_dict, depth=1):\n \"\"\" walks through the dict, makes a form\"\"\"\n stuff = \"\"\n\n def metadata_in_name(target_string, values):\n \"\"\"serioulsy\"\"\"\n return 0 in [target_string.find(value) for value in values]\n\n for rng_key, rng_val in sorted(target_dict.items(), key=lambda x: x[0]):\n if isinstance(rng_val, dict):\n cssclass = \"\"\n if metadata_in_name(rng_key, ['zeroOrMore', 'oneOrMore']):\n cssclass = \"class='multiple'\"\n clean_name = rng_key.replace('optional.', '').replace(\n 'oneOrMore.', '').replace('.data', '').replace(\n 'zeroOrMore.', '')\n stuff +=\"<div class=\\\"{0}\\\" >\".format(clean_name)\n stuff += \"<h{0} {2} rel='togglable' class=\\\"{3}_rel\\\">{1}<span class=\\\"nice-span glyphicon glyphicon-minus\\\"></span></h{0}>\".format(depth, rng_key, cssclass, clean_name)\n stuff += \"<div class='holder{}'>\".format(depth)\n stuff += walk_dict(rng_val, depth + 1)\n else:\n def find_key(a_dict, key):\n \"\"\"find keys\"\"\"\n for his_key, his_val in a_dict.items():\n if isinstance(his_val, dict):\n found = find_key(his_val, key)\n if found:\n return [his_key] + found\n elif his_val == key:\n return [his_key]\n\n def make_input_name(value):\n \"\"\"makes input name\"\"\"\n values = ['optional', 'value',\n 'oneOrMore', 'data', \"zeroOrMore\"]\n\n def strip_meta(this_string):\n \"\"\"removes metadata\"\"\"\n wot = this_string.replace('optional', '').replace(\n 'oneOrMore', '').replace('.data', '').replace(\n 'zeroOrMore', '').replace('.', '')\n return wot\n ret = [strip_meta(tag) for tag in find_key(inside, value) if tag not in values]\n return \".\".join(ret)\n\n stuff += \"\\n<div class=\\\"{0}\\\"><div style='font-weight:bold;'>{1}</div>\".format(\n make_input_name(rng_val),\n \".\".join(find_key(inside, rng_val)))\n\n def val_starts_with(base_string, strings):\n \"\"\" check if str startswith \"\"\"\n for the_string in strings:\n if base_string.startswith(the_string):\n return True\n if len(make_input(rng_val)) < 45:\n if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']):\n stuff += \"<input class='selectable' value=\\\"{}\\\" style='width:87%' name=\\\"{}\\\" readonly>\".format(\n make_input(rng_val), make_input_name(rng_val))\n else:\n stuff += \"<input class='selectable' value=\\\"{}\\\" style='width:87%' name=\\\"{}\\\">\".format(\n \"\", make_input_name(rng_val))\n else:\n if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']):\n stuff += \"<textarea class='selectable' rows='8' cols='120' readonly name=\\\"{1}\\\">{0}</textarea>\".format(\n make_input(rng_val), make_input_name(rng_val))\n else:\n stuff += \"<textarea class='selectable' rows='8' cols='120'>{0}</textarea>\".format(\n \"\")\n stuff += \"</div>\"\n stuff+=\"</div>\"\n stuff += \"</div>\"\n return stuff\n return walk_dict(results)", "def buildDict(self, dict):\n for word in dict:\n self.add(word)", "def dict_form2(fac_data):\n #sort through last names\n #create key for each last name\n #if key exists, add list of values [degree, title -'of biostatistics', email]\n \n form_dict = {}\n\n for i in fac_data:\n #get name\n split_name = i['name'].split(\" \")\n last_name = split_name[len(split_name)-1]\n first_name = split_name[0]\n key = (last_name, first_name)\n \n #build array of degree/title/email\n fixed_title = i[' title'].strip(\" of Biostatistics\")\n \n info = []\n info.append(i[' degree'])\n info.append(fixed_title)\n info.append(i[' email'])\n \n #add to dictionary\n if key in form_dict:\n form_dict[key].append([info])\n else:\n form_dict[key] = info\n\n return form_dict", "def clean_form_dict(self, dict_):\n clean_dict = {}\n first_pdb_type, first_pdb_id, first_pdb_file = '', '', ''\n second_pdb_type, second_pdb_id, second_pdb_file = '', '', ''\n x1, y1, z1, x2, y2, z2 = '0', '0', '0', '0', '0', '0'\n degXY_1, degYZ_1, degXY_2, degYZ_2 = '0', '0', '0', '0'\n\n num_of_proteins = dict_.get('num_of_proteins')\n user_rand = dict_.get('user_rand')\n first_pdb_type = dict_.get('first_pdb_type')\n if first_pdb_type == 'by_id':\n first_pdb_id = dict_.get('first_pdb_id')\n first_pdb_file = ''\n elif first_pdb_type == 'by_file':\n first_pdb_id = ''\n first_pdb_file = dict_.get('first_pdb_file')\n\n if num_of_proteins == '2':\n second_pdb_type = dict_.get('second_pdb_type')\n if second_pdb_type == 'by_id':\n second_pdb_id = dict_.get('second_pdb_id')\n second_pdb_file = ''\n elif first_pdb_type == 'by_file':\n second_pdb_id = ''\n second_pdb_file = dict_.get('second_pdb_file')\n x2, y2, z2 = dict_.get('x2', 0), dict_.get('y2', 0), dict_.get('z2', 0)\n degXY_2, degYZ_2 = dict_.get('degXY_2', 0), dict_.get('degYZ_2', 0)\n\n x1, y1, z1 = dict_.get('x1', 0), dict_.get('y1', 0), dict_.get('z1', 0)\n degXY_1, degYZ_1 = dict_.get('degXY_1', 0), dict_.get('degYZ_1', 0)\n\n temperature_scale = dict_.get('temperature_scale', '')\n temperature = dict_.get('temperature', '')\n time_step_number = dict_.get('time_step_number', '')\n\n clean_dict['user_rand'] = user_rand\n clean_dict['num_of_proteins'] = num_of_proteins\n clean_dict['first_pdb_type'] = first_pdb_type\n clean_dict['first_pdb_id'] = first_pdb_id\n clean_dict['first_pdb_file'] = first_pdb_file\n clean_dict['second_pdb_type'] = second_pdb_type\n clean_dict['second_pdb_id'] = second_pdb_id\n clean_dict['second_pdb_file'] = second_pdb_file\n clean_dict['x1'] = x1\n clean_dict['y1'] = y1\n clean_dict['z1'] = z1\n clean_dict['x2'] = x2\n clean_dict['y2'] = y2\n clean_dict['z2'] = z2\n clean_dict['degXY_1'] = degXY_1\n clean_dict['degYZ_1'] = degYZ_1\n clean_dict['degXY_2'] = degXY_2\n clean_dict['degYZ_2'] = degYZ_2\n clean_dict['temperature_scale'] = temperature_scale\n clean_dict['temperature'] = temperature\n clean_dict['time_step_number'] = time_step_number\n\n return clean_dict", "def dict_form1(fac_data):\n #sort through last names\n #count number last names? maybe not\n #create key for each last name\n #if key exists, add list of values [degree, title -'of biostatistics', email]\n \n form_dict = {}\n\n for i in fac_data:\n #get name\n split_name = i['name'].split(\" \")\n last_name = split_name[len(split_name)-1]\n \n #build array of degree/title/email\n fixed_title = i[' title'].strip(\" of Biostatistics\")\n \n info = []\n info.append(i[' degree'])\n info.append(fixed_title)\n info.append(i[' email'])\n \n #add to dictionary\n if last_name in form_dict:\n form_dict[last_name].append([info])\n else:\n form_dict[last_name] = info\n\n return form_dict", "def convert_for_form(data):\n if \"name\" in data:\n data[\"full_name\"] = data[\"name\"].get(\"value\")\n try:\n data[\"given_names\"] = data[\"name\"].get(\n \"value\").split(\",\")[1].strip()\n except IndexError:\n data[\"given_names\"] = \"\"\n data[\"family_name\"] = data[\"name\"].get(\"value\").split(\",\")[0].strip()\n data[\"display_name\"] = data[\"name\"].get(\"preferred_name\")\n data[\"status\"] = data[\"name\"].get(\"status\", \"\").lower()\n if \"urls\" in data:\n data[\"websites\"] = []\n for url in data[\"urls\"]:\n if \"description\" not in url:\n data[\"websites\"].append({\"webpage\": url[\"value\"]})\n else:\n if url[\"description\"].lower() == \"twitter\":\n data[\"twitter_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"blog\":\n data[\"blog_url\"] = url[\"value\"]\n elif url[\"description\"].lower() == \"linkedin\":\n data[\"linkedin_url\"] = url[\"value\"]\n del data[\"urls\"]\n if \"field_categories\" in data:\n data[\"research_field\"] = data['field_categories']\n if \"positions\" in data:\n data[\"institution_history\"] = []\n for position in data[\"positions\"]:\n if not any(\n [\n key in position for key in ('name', 'rank',\n 'start_year', 'end_year')\n ]\n ):\n if 'email' in position:\n # Only email available, take as public_email\n data[\"public_email\"] = position.get(\"email\")\n continue\n pos = {}\n pos[\"name\"] = position.get(\"institution\", {}).get(\"name\")\n pos[\"rank\"] = position.get(\"rank\", \"\")\n pos[\"start_year\"] = position.get(\"start_date\", \"\")\n pos[\"end_year\"] = position.get(\"end_date\", \"\")\n pos[\"current\"] = True if position.get(\"status\") else False\n pos[\"old_email\"] = position.get(\"old_email\", \"\")\n if position.get(\"email\"):\n pos[\"email\"] = position.get(\"email\", \"\")\n if not data.get(\"public_email\"):\n data[\"public_email\"] = position.get(\"email\")\n data[\"institution_history\"].append(pos)\n data[\"institution_history\"].reverse()\n if 'advisors' in data:\n advisors = data['advisors']\n data['advisors'] = []\n for advisor in advisors:\n adv = {}\n adv[\"name\"] = advisor.get(\"name\", \"\")\n adv[\"degree_type\"] = advisor.get(\"degree_type\", \"\")\n data[\"advisors\"].append(adv)\n if \"ids\" in data:\n for id in data[\"ids\"]:\n try:\n if id[\"type\"] == \"ORCID\":\n data[\"orcid\"] = id[\"value\"]\n elif id[\"type\"] == \"BAI\":\n data[\"bai\"] = id[\"value\"]\n elif id[\"type\"] == \"INSPIRE\":\n data[\"inspireid\"] = id[\"value\"]\n except KeyError:\n # Protect against cases when there is no value in metadata\n pass", "def _get_normalized_form_data(self, form, key):\n data = {} if form.data else form.initial\n prefix = 'gc{}-'.format(key)\n\n for field_name in form.data:\n normalized_field_name = field_name[len(prefix):]\n\n if field_name in form.data and field_name.startswith(prefix) and form.data[field_name]:\n data[normalized_field_name] = form.data[field_name]\n\n for field_name in data:\n if field_name == 'quantity':\n data[field_name] = str(data[field_name])\n\n return data", "def _request_hash_map(hash_map, form):\n error_code = None\n old_name = hash_map['name']\n hash_map['name'] = form['name']\n hash_map['value'] = {}\n\n # Check that the name hash map has between 2 and 20 characters\n if not check.length(hash_map['name'], LENGTH_MIN_HASHMAP_NAME, LENGTH_MAX_HASHMAP_NAME):\n error_code = ('hash_table_msg', 'error_1')\n\n # Verify that the format of the name is correct\n elif not check.username(hash_map['name']):\n error_code = ('hash_table_msg', 'error_2')\n\n # Check that the name is new\n if error_code is None and old_name != hash_map['name']:\n hash_map_old = model.hash_table.find(name=hash_map['name'], only_one=True)\n if hash_map_old:\n error_code = ('hash_table_msg', 'error_5')\n\n # Get len label\n len_label = int(form[\"len\"])\n\n # I look for fields that contain the keys,\n # then I browse to the field until the larger number.\n for i in range(len_label):\n label_key = 'label-name-{}'.format(i)\n key = form[label_key].strip()\n\n # Check that the key has between 2 and 30 characters\n if not check.length(key, LENGTH_MIN_KEY, LENGTH_MAX_KEY):\n error_code = ('hash_table_msg', 'error_3')\n\n # Verify that the format of the key is correct\n elif not check.username(key):\n error_code = ('hash_table_msg', 'error_4')\n\n # It doesn't take into dictionary the empty keys\n if check.length(key, LENGTH_MIN_KEY, LENGTH_MAX_KEY):\n # Initial language values\n hash_map['value'][key] = {}\n\n for code, name in languages_object.all_lang_by_tuple:\n label_value = 'label-{}-{}'.format(code, i)\n\n value = form.get(label_value, \"\")\n hash_map['value'][key][code] = value\n return hash_map, error_code", "def render_form(obj):\n if obj:\n try:\n return ''.join([render_input(name, value) for name, value in obj.items()])\n except:\n pass\n try:\n fields = obj.split(',')\n return ''.join([render_input(name) for name in fields])\n except:\n pass\n return \"INVALID DATA FOR render_form: \" + str(type(obj)) + str(obj)\n\n return ''", "def buildDict(self, dict):\n for item in dict:\n length = len(item)\n if length not in self.dic:\n self.dic[length] = [item]\n else:\n self.dic[length].append(item)", "def crear():\n\n return dict(form=form)", "def create_report(self, form_data):\n R = {}\n for e in self.entries:\n R[e.name] = e.get_value(form_data)\n \n return(R)", "def buildDict(self, dict):\n for x in dict:\n self.EntireSet.append(x)\n print self.EntireSet", "def translate_from(original_form, source):\r\n # retrieve the correct translation dictionary\r\n source_dict = get_dict(source)\r\n # recreate the form with the translated values\r\n common_form = {}\r\n for key in source_dict.keys():\r\n if source_dict[key] in original_form.keys():\r\n common_form[key] = original_form[source_dict[key]]\r\n else:\r\n common_form[key] = \"\"\r\n return common_form", "def mapData(form, data, fromKeyFunc, toKeyFunc):\n rv = {}\n\n def visitItem(node):\n fromName = fromKeyFunc(node)\n toName = toKeyFunc(node)\n\n value = data.get(fromName, MISSING)\n if value != MISSING:\n rv[toName] = value\n\n def visit(node):\n if hasattr(node, 'items'):\n for item in node.items:\n visit(item)\n else:\n visitItem(node)\n\n visit(form)\n return rv", "def _write_dict(pdf, input_dict, indent=0, border=BORDER):\n for key, value in input_dict.items():\n pdf.set_font(\"arial\", \"B\", 14)\n pdf.cell(75, 5, key, border, 1, \"L\")\n pdf.cell(indent, 0)\n pdf.set_font(\"arial\", \"\", 12)\n pdf.multi_cell(150, 5, value, border, \"L\")\n pdf.ln(h=5)", "def _generate(self, custom_data: typing.Dict) -> typing.Dict:\n info = {}\n for field in self.fields:\n if field.name in custom_data:\n info[field.name] = custom_data[field.name]\n else:\n info[field.name] = field.generate(info)\n\n return info", "def make_from_clean_dict(dict):\n household = Household()\n for k, v in dict.items():\n if k == \"head\":\n household.__setattr__(k, Member.make_from_clean_dict(v))\n elif k == \"spouse\":\n household.__setattr__(k, Member.make_from_clean_dict(v))\n elif k == \"others\":\n newvals = [Member.make_from_clean_dict(d) for d in v]\n household.__setattr__(k, newvals)\n elif k == \"address\":\n household.__setattr__(k, Address.make_from_clean_dict(v))\n elif k == \"clean_json_string\":\n pass\n else:\n household.__setattr__(k, v)\n return household", "def buildDict(self, dict):\n for word in dict:\n self.s.add(word)\n self.length_set = set([len(word) for word in dict])", "def render_form(form):\n return {\n 'form': form,\n }", "def _form_data(self, response):\n SQFI_audit_type = response.xpath(self.filters[6]).extract_first()\n SQFI_audit_type_val = response.xpath(self.filters[7]).extract_first()\n food_sector_categories = response.xpath(self.filters[8]).extract_first()\n food_sector_categories_val = response.xpath(self.filters[9]).extract()\n audit_rating = response.xpath(self.filters[10]).extract_first()\n audit_rating_val = response.xpath(self.filters[11]).extract()\n country = response.xpath(self.filters[12]).extract_first()\n country_val = response.xpath(self.filters[13]).extract()\n form_data = {\n SQFI_audit_type: SQFI_audit_type_val,\n food_sector_categories: food_sector_categories_val,\n audit_rating: audit_rating_val,\n country: country_val,\n }\n return form_data", "def createFormatMap(self, form, renderable, **extras):\n\n fmtmap = renderable.__dict__.copy()\n fmtmap.update(extras)\n\n def replaceVars(match):\n\n try:\n var = match.group()[2:-1]\n if var and var.endswith(\":lexical\"):\n var = var[:-len(\":lexical\")]\n value = form.getFieldValue(var, lexical=True) or ''\n else:\n value = form.getFieldValue(var) or ''\n\n if not isinstance(value, str):\n if not hasattr(value, \"decode\"):\n value = str(value)\n value = value.decode('utf-8')\n return value\n except:\n return match.group()\n\n # process labels and hints\n if 'label' in fmtmap and fmtmap['label'] != None:\n fmtmap['label'] = VAREXP.sub(replaceVars, fmtmap['label'])\n if 'hint' in fmtmap and fmtmap['hint'] != None:\n fmtmap['hint'] = VAREXP.sub(replaceVars, fmtmap['hint'])\n if 'text' in fmtmap and fmtmap['text'] != None:\n fmtmap['text'] = VAREXP.sub(replaceVars, fmtmap['text'])\n if 'placeholder' in fmtmap and fmtmap['placeholder'] != None:\n fmtmap['placeholder'] = VAREXP.sub(replaceVars,\n fmtmap['placeholder'])\n\n # defaults\n extra_classes = {'relevant': True, 'required': False,\n 'readonly': False, 'error': False}\n\n # Let's see whether we got properties here...\n try:\n if hasattr(renderable, 'bind') and renderable.bind:\n # Requiredness\n if form.model.isRequired(renderable.bind, form.data):\n extra_classes[\"required\"] = True\n\n if not form.model.isRelevant(renderable.bind, form.data):\n extra_classes[\"relevant\"] = False\n\n # Read only\n if form.model.isReadonly(renderable.bind, form.data):\n extra_classes[\"readonly\"] = True\n\n elif hasattr(renderable, 'getRenderables') and \\\n callable(renderable.getRenderables):\n\n # Group relevance\n if not form.model.isGroupRelevant(renderable, form.data):\n extra_classes[\"relevant\"] = False\n\n except:\n pass\n\n if extras.get(\"errors\", None) and \\\n hasattr(renderable, 'bind') and renderable.bind and \\\n extras['errors'].get(renderable.bind, None):\n\n extra_classes['error'] = True\n\n if getattr(renderable, 'alert', ''):\n fmtmap['alert'] = renderable.alert\n else:\n fmtmap['alert'] = \"; \".join(extras['errors'][renderable.bind])\n\n else:\n\n fmtmap['alert'] = ''\n\n if \"extra_classes\" in fmtmap:\n fmtmap['extra_classes'] = \" \".join([fmtmap['extra_classes']] + \\\n [key for key in\n list(extra_classes.keys())\n if extra_classes[key]])\n else:\n fmtmap['extra_classes'] = \" \".join([key for key in\n list(extra_classes.keys()) if\n extra_classes[key]])\n\n fmtmap['type'] = self.getType(renderable)\n\n return fmtmap", "def dict(dict: Dict[str, Pin], /) -> None:", "def code_input(dict_, i):\n for key in dict_:\n dict_[key] = i + dict_[key]\n return dict_", "def get_processed_form_data(form, form_element_entries):\n keys_to_remove = get_ignorable_form_fields(form_element_entries)\n values_to_remove = get_ignorable_form_values()\n\n field_name_to_label_map = \\\n get_field_name_to_label_map(form, keys_to_remove, values_to_remove)\n\n keys_to_remove = list(field_name_to_label_map.keys())\n\n return (\n field_name_to_label_map,\n get_cleaned_data(form, keys_to_remove, values_to_remove)\n )", "def encodeToKVForm(self):\n return self.fields.toKVForm()", "def _flatten_dict(self, obj, prefix=''):\n\n encoded_dict = QueryDict('').copy()\n\n if hasattr(obj, 'items'):\n for key, value in obj.items():\n\n item_key = '%(prefix)s%(key)s' % { 'prefix': prefix, 'key': key }\n\n # Flatten lists for formsets and model choice fields\n if isinstance(value, list):\n for i, item in enumerate(value):\n\n if isinstance(item, dict):\n\n # Flatten nested object to work with formsets\n item_prefix = '%(key)s-%(index)d-' % { 'key': key, 'index': i }\n encoded_dict.update(self._flatten_dict(item, prefix=item_prefix))\n\n # ID for use with model multi choice fields\n id_value = item.get('id', None)\n if id_value:\n encoded_dict.update({ key: id_value })\n\n else:\n\n # Value for use with model multi choice fields\n encoded_dict.update({ key: item })\n\n # ID for use with model choice fields\n elif isinstance(value, dict):\n encoded_dict[item_key] = value.get('id', value)\n\n # Keep JavaScript null as Python None\n elif value is None:\n encoded_dict[item_key] = None\n\n # Other values are used directly\n else:\n encoded_dict[item_key] = unicode(value)\n\n return encoded_dict", "def buildDict(self, dict):\n self.all_words = set(dict)\n self.wc_dict = collections.defaultdict(int)\n for w in dict:\n for wc in self.get_wildcards(w):\n self.wc_dict[wc] += 1", "def expand_galaxy_input_dict(val):\n out = {}\n for k, v in val.items():\n out[k] = v\n for k, v in val.items():\n kl = k.split(\"|\")\n if len(kl) > 1:\n o = out\n for kli in kl[:-1]:\n if kli not in o:\n o[kli] = {}\n o = o[kli]\n o[kl[-1]] = v\n return out", "def build_choices(header, dictionary, after):\n out = f\"{header}\\n\"\n for i, (key, item) in enumerate(dictionary.items(), start=1):\n out += f\"{INDENT_STRING}{i}. {item}\\n\"\n out += after\n return out", "def compress(self, data_list):\n data = {}\n if data_list:\n data = dict(\n (f.name, data_list[i]) for i, f in enumerate(self.form))\n\n f = self.form.__class__(data)\n f.is_valid()\n return f.cleaned_data\n return data", "def populate_form(self, **kwargs):\n for name, value in kwargs.items():\n self.populate_field(name, value)", "def translate_to(common_form, target):\r\n # retrieve the correct translation dictionary\r\n target_dict = get_dict(target)\r\n # recreate the form with the translated keys\r\n target_form = {target_dict[key]: common_form[key]\r\n for key in target_dict.keys()}\r\n return target_form", "def wizard_process_received_form(form):\n lines = {key.split('_')[1]: value.split('_')[1] for key, value in form.items() if key.startswith(\"line\")}\n # print(lines)\n times = {key.split('_')[1]: value for key, value in form.items() if key.startswith(\"time\")}\n # print(times)\n return {int(value): times[key] for key, value in lines.items()}", "def payload_from_form(form, prefix='', delete=False):\n\n prefix = f'{prefix}-' if prefix else ''\n payload = {f'{prefix}{k}': form[k].value() for k, v in form.fields.items() if form[k].value()}\n if getattr(form.instance, 'id'):\n payload['id'] = form.instance.id\n\n if delete:\n payload['delete'] = True\n return payload", "def process_step(self, form):\n #print(form.data)\n\n #print(form.data)\n #print(self)\n \n institution = {}\n inst_list = []\n if self.steps.current == '1':\n \n institution['institution'] = form.data['1-0-institution']\n institution['date_from'] = form.data['1-0-date_from']\n institution['date_to'] = form.data['1-0-date_to']\n inst_list.append(institution)\n inst_keys = dict(form.data.lists())\n \n #Create dictionary dynamically for the other institutions incase more than two institutions are entered\n if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is list:\n inst_list2 = []\n #Add institutions \n for i,insti in enumerate(inst_keys.get('1-NaN-institution')):\n inst_i = {}\n #print(i)\n date_from = inst_keys['1-NaN-date_from'][i]\n date_to = inst_keys['1-NaN-date_to'][i]\n course_duration = inst_keys['1-NaN-course_duration'][i]\n inst_i['institution'] = insti\n inst_i['date_from'] = date_from\n inst_i['date_to'] = date_to\n \n inst_list2.append(inst_i)\n #print(inst_list2)\n inst_list.extend(inst_list2)\n #Create dictionary dynamically for the other institutions incase more than two institutions are entered\n if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is not list:\n inst_0 = {}\n inst_0['institution'] = form.data['1-NaN-institution']\n inst_0['date_from'] = form.data['1-NaN-date_from']\n inst_0['date_to'] = form.data['1-NaN-date_to']\n inst_0['course_duration'] = form.data['1-NaN-course_duration']\n #inst_0['achievements'] = ''\n inst_list.append(inst_0)\n \n #Add the entered information to a session object\n self.request.session['institution'] = inst_list", "def transform_dict(dc: dict):\n tmp_dict = dict()\n for k, v in dc.items():\n k1, k2 = k.split(\"|\")\n v1 = {'e': v, 'c': k2}\n v2 = {'e': v, 'c': k1}\n insert_to_dict(tmp_dict, k1, v1)\n insert_to_dict(tmp_dict, k2, v2)\n return tmp_dict", "def normalization(obj):\n dic = obj.mainfield.para_dict.copy()\n for item in obj.forfield: dic.update(item.para_dict)\n for item in obj.existfield: dic.update(item.para_dict)\n\n global_dic = number_type(dic)\n obj.normal_guards = norm_rep(global_dic, obj.all_sentence)\n\n main_dic = number_type(obj.mainfield.para_dict)\n obj.mainfield.content = norm_rep(main_dic, obj.mainfield.content)\n\n for index in range(len(obj.forfield)):\n obj.forfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # for_dic = number_type(temp_dic)\n obj.forfield[index].content = norm_rep(global_dic, obj.forfield[index].content)\n print(global_dic, obj.forfield[index].para_dict)\n obj.forfield[index].para_dict = pair_2_dict(global_dic, obj.forfield[index].para_dict)\n\n for index in range(len(obj.existfield)):\n obj.existfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # exist_dic = number_type(temp_dic)\n obj.existfield[index].content = norm_rep(global_dic, obj.existfield[index].content)\n obj.existfield[index].para_dict = pair_2_dict(global_dic, obj.existfield[index].para_dict)\n\n # change para_dict: {'i':'NODE} -> {'NODE_1', 'NODE'}\n obj.mainfield.para_dict = pair_2_dict(global_dic, obj.mainfield.para_dict)", "def _session_data_to_forms(self, user, department, session_data):\n forms = OrderedDict()\n\n if not session_data:\n key, form = self._create_certificate_form(user, department)\n forms.update({key: form})\n else:\n for index, (key, form_data) in enumerate(session_data):\n for data_key in form_data:\n if data_key == 'quantity':\n try:\n form_data[data_key] = decimal.Decimal(form_data[data_key])\n except decimal.InvalidOperation:\n form_data[data_key] = ''\n\n form = CertificateForm(user, department, initial=form_data,\n prefix='gc{}'.format(key), index=index + 1)\n forms.update({key: form})\n\n return forms", "def flatten_dict(base, v, d):\n if base != '':\n base = base + \".\"\n for k in d:\n if type(d[k]) in (type('a'), type(u'a'), type(1), type(1.1), type(False), type(None)):\n v[base + k] = d[k]\n elif type(d[k]) in (type([]), type((1,2))):\n v[base + k] = \", \".join(d[k])\n elif type(d[k]) == type({}):\n flatten_dict(base + k, v, d[k])\n else:\n print \"huh,\", type(d[k])", "def get_cleaned_data(form, keys_to_remove=[], values_to_remove=[]):\n if not values_to_remove:\n values_to_remove = get_ignorable_form_values()\n\n cleaned_data = copy.copy(form.cleaned_data)\n cleaned_data = clean_dict(\n cleaned_data,\n keys=list(set(cleaned_data.keys()) - set(keys_to_remove)),\n values=values_to_remove\n )\n\n ordered_cleaned_data = OrderedDict()\n for key in form.fields.keys():\n if key in cleaned_data:\n ordered_cleaned_data[key] = cleaned_data[key]\n\n return ordered_cleaned_data", "def _render_dict(input_dict: Dict[str, Any]) -> Dict[str, Any]:\n output_dict = {}\n\n for key, value in input_dict.items():\n if isinstance(value, str):\n new_value = string.Template(value).substitute(_mapping)\n output_dict[key] = new_value\n elif isinstance(value, dict):\n output_dict[key] = _render_dict(value) # type: ignore\n else:\n output_dict[key] = value\n\n return output_dict", "def get_processed_form_wizard_data(form_wizard, form_list,\n form_element_entries):\n field_name_to_label_map = {}\n cleaned_data = {}\n for form in form_list:\n _field_name_to_label_map, _cleaned_data = get_processed_form_data(\n form,\n form_element_entries\n )\n field_name_to_label_map.update(_field_name_to_label_map)\n cleaned_data.update(_cleaned_data)\n\n return (\n field_name_to_label_map,\n cleaned_data\n )", "def procesos(self):\n for name, value in self.cleaned_data.items():\n yield (name, value)", "def _generate_form_fields(self):\n params = list(filter(lambda x: (x.precedence is None or x.precedence >= 0) and not x.constant,\n self.param.params().values()))\n for p in sorted(params, key=lambda p: p.precedence or 9999):\n # TODO: Pass p.__dict__ as second argument instead of arbitrary\n p_name = p.name\n\n # Preserve param tuple type.\n if self.data:\n if isinstance(getattr(self.param, p.name), tuple):\n p.default = tuple(self.data.getlist(p.name))\n\n # Preserve initial options for Selector\n if isinstance(self.param.params()[p_name], (param.FileSelector, param.MultiFileSelector)):\n p.default = \"\"\n\n self.fields[p_name] = self.widget_map[type(p)](self.param, p, p.name)\n self.fields[p_name].label = p.name.replace(\"_\", \" \").title()\n if self.read_only is None:\n widget_attribute = {'class': 'form-control'}\n else:\n # TODO: Should this be readonly instead of disable?\n widget_attribute = {'class': 'form-control', 'disabled': self.read_only}\n self.fields[p_name].widget.attrs.update(widget_attribute)\n self.fields[p_name].required = not self.param.params()[p_name].allow_None\n self.fields[p_name].disabled = self.param.params()[p_name].constant\n self.fields[p_name].help_text = self.param.params()[p_name].doc\n # self.fields = self.base_fields", "def build(self, data: dict):", "def dictionarytoraw(dict):\n\n data = \"\"\n\n for key, val in dict.items():\n if isinstance(val,dict):\n for kkey, vval in iter(val.items()):\n if kkey is None:\n data += str(key) + NULL + str(vval) + NULL\n else:\n data += ''.join([str(key), '\\x1c', str(kkey), NULL, str(vval), NULL])\n else:\n data += str(key) + NULL + str(val) + NULL\n\n return (len(data) > 0 and data) or NULL", "def __init__(self, dictionary):\n self.d = {}\n for word in dictionary:\n abbr = self.getAbbr(word)\n if abbr in self.d:\n self.d[abbr] += word,\n else:\n self.d[abbr] = [word]", "def ask(cwl, out):\n\n # iterate keys to get required fields out of the dict\n for inp in cwl.keys():\n if (isinstance(cwl[inp], dict) and inp not in exclude): # call recursively\n if inp == 'properties':\n ask(cwl[inp], out)\n continue # continue in loop w/o nesting\n out.update({inp: {}}) # use this key as outer key, creater nest\n ask(cwl[inp], out[inp]) # pass the updated, nested dict back in\n if not isinstance(cwl[inp], list): # 'required' is always a list\n continue # in the loop, not 'continue to the next step'\n # ask to provide an input\n for field in cwl[inp]:\n m = 'Please provide an input value for \"{}\": '.format(field)\n out.update({field: input(m)}) \n \n out = stripper(out)\n return out", "def minimal_form_data():\n\n form_data = { \n 'status': '0',\n 'title': 'Recurso de teste',\n 'description': 'Recurso para testes',\n 'abstract': 'Resumo',\n \n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0', \n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0', \n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data", "def populate(self, values):\r\n field_names = [f['name'] for f in self.fields]\r\n for name in [name for name in values.keys() if name in field_names]:\r\n self[name] = values.get(name, '')\r\n\r\n # We have to do an extra trick to catch unchecked checkboxes\r\n for name in [name for name in values.keys() if name[9:] in field_names\r\n and name.startswith('checkbox_')]:\r\n if not values.has_key(name[9:]):\r\n self[name[9:]] = '0'", "def formkeys(entry_temp):\r\n\r\n return nformat.format_keys(transpose_keys(entry_temp,\r\n notebook=notebook))", "def visit_dict(self, sydict):\n for key, value in sydict.items():\n child = type(value)(value.container_type)\n self.current[key] = child\n value.visit(SpineCopyVisitor(child))", "def init_dict(self, train_sents, dict_ord=None):\n if dict_ord is None:\n dict_ord = self.MIN_VALID\n\n for sent in train_sents:\n for form, tag in sent:\n # handle plurals\n if tag == 'NNS' and self.split_plurals:\n form = self._plural_to_singular(form)\n # lowercase\n if self.lowercase:\n form = self._lowercase(form)\n # add new normalized forms to dictionary\n if form not in self.dict:\n self.dict[form] = dict_ord\n self.rev_dict[dict_ord] = form\n dict_ord += 1\n\n return dict_ord", "def process_metadata(full_dict):\n reduced_dict = {}\n for key, param_obj in full_dict.items():\n if key[0] == '_':\n continue\n if is_numerical(param_obj):\n reduced_dict[key] = param_obj\n elif key == 'grid':\n grid_dict = param_obj._get_metadata_dict()\n reduced_dict.update(grid_dict)\n else:\n reduced_dict[key] = str(param_obj)\n return reduced_dict", "def dictogram_dictlist(self):\n for key, value in self.word_dict.items():\n self.word_dict[key] = dictogram.Dictogram(value)\n # print(\"self.word_dict\", self.word_dict)", "def format_dict(\n d: typing.Mapping[TTextType, TTextType]\n) -> typing.Iterator[TViewLine]:\n\n return format_pairs(d.items())", "def _prepare_multipart_form_data(data):\n output = dict()\n for key in data:\n output[key] = (None, data[key])\n return output", "def _generate_nml_from_dict(d: dict, name: str = \"input\"):\n\n def logical(b: bool):\n return \".true.\" if b else \".false.\"\n\n with tempfile.NamedTemporaryFile(prefix=\"swami_\", delete=False, suffix=\".inp\", mode=\"r+\") as nml:\n nml.write(f\"&{name}\\n\")\n for k, v in d.items():\n if isinstance(v, bool):\n nml.write(f\"{k} = {logical(v):s}\\n\")\n elif isinstance(v, str):\n nml.write(f\"{k} = '{v:s}'\\n\")\n elif isinstance(v, float):\n nml.write(f\"{k} = {v:23.16e}\\n\")\n elif isinstance(v, int):\n nml.write(f\"{k} = {v:d}\\n\")\n else:\n nml.write(f\"{k} = {v}\\n\")\n # nml.write(\"\\\\\")\n nml.write(\"&end\\n\")\n nml.close()\n\n return nml.name", "def _build(specs_dict: dict, **kwargs: bool):\n return [\n Card(face, suit, value=specs_dict.get(face).get(suit), **kwargs)\n for face in specs_dict.keys()\n for suit in specs_dict.get(face).keys()\n ]", "def form_data(self, clear=[], **kwargs):\n form_data = {\n 'payer_name': 'William Williams',\n 'billing_address': '1 Main Street',\n 'billing_city': 'Anytown',\n 'country': 'USA',\n 'billing_state': 'MD',\n 'billing_zip': '20852',\n 'payment_type': 'CreditCard',\n 'project_code': '15-4FF',\n 'payment_amount': '3000',\n 'information_consent': True,\n }\n for key in clear:\n del form_data[key]\n for key, value in kwargs.items():\n form_data[key] = value\n return form_data", "def build_dct(dic, keys, value):\n key = keys.pop(0)\n if len(keys):\n dic.setdefault(key, {})\n build_dct(dic[key], keys, value)\n else:\n # Transform cookbook default attribute strings into proper booleans\n if value == \"false\":\n value = False\n elif value == \"true\":\n value = True\n # It's a leaf, assign value\n dic[key] = value", "def get_form_kwargs(self, i):\n return dict(request=self.request)", "def render_form():", "def makeSaveStringForPypet(value, savestr):\n for k, v in value.items():\n if isinstance(v, dict):\n _savestr = savestr + k + \".\"\n makeSaveStringForPypet(v, _savestr)\n else:\n _savestr = savestr + k\n self.traj.f_add_result(_savestr, v)", "def _from_dict_to_destination(self):\n self._translated_xml_tree = etree.Element(\"root\")\n for key, value in self._translated_dict[\"root\"].items():\n etree.SubElement(self._translated_xml_tree, key).text = value", "def init_from_dict(self, d):\n for k, v in d.items():\n # First, keys must be strings, not ints\n if isinstance(k, int):\n k = str(k)\n # Now, assign to the key, handling nested AttrDicts properly\n if isinstance(v, dict):\n self.set_key(k, AttrDict(v))\n elif isinstance(v, list):\n self.set_key(k, [i if not isinstance(i, dict) else AttrDict(i)\n for i in v])\n else:\n self.set_key(k, v)", "def reprocess_dict (dict1):\n out = {};\n for kk,value in dict1.iteritems():\n # parse keys\n (lo0,dur,decdec,freqmhz,nch),weight = kk[0].split(\"_\"),kk[1]\n if weight != \"natural\":\n weight += \":\" + kk[3];\n dec = -int(decdec.split(\"-\")[1]);\n freq = int(freqmhz[:-3])\n # parse layout\n lo = lo0;\n if lo[-2] in \"abcd\":\n lores = \"0.\"+lo[-1];\n lofreq = dict(a=650,b=800,c=1000,d=1400)[lo[-2]];\n lo = lo[:-2];\n else:\n lores = 0;\n lofreq = 0;\n lo = lo[4:];\n l00 = lo0[4:]\n wbins.add(weight);\n # make new entry\n out[lo0,lores,lofreq,freq,dec,weight] = [value,kk];\n return out;", "def process_dict(self, dictionary):\n return self._flatten(dictionary)", "def _dict_params(self, the_dict: Dict):\n return [p for _, e in the_dict.items() for p in self._params(e)]", "def forms(self):\r\n forms = FormsDict()\r\n for name, item in self.POST.iterallitems():\r\n if not hasattr(item, 'filename'):\r\n forms[name] = item\r\n return forms", "def curseschema(schema, value_dict, unpacked_errors, state=None):\n for key, value in schema.fields.items():\n if isinstance(value, formencode.Schema):\n yield key, dict(curseschema(value, value_dict[key],\n unpacked_errors.get(key, {})))\n elif isinstance(value, formencode.Validator):\n if key not in unpacked_errors and key in value_dict:\n yield key, value.to_python(value_dict[key], state)\n else:\n raise TypeError(\"Expected `formencode.Schema` or \"\n \"`formencode.Validator`, got %s instead.\" % (type(value)))", "def _convert(self, dictlike):\n for incoming_key, valuelist in util.dictlike_iteritems(dictlike):\n for value in valuelist:\n new_key = self.keyfunc(value)\n if incoming_key != new_key:\n raise TypeError(\n \"Found incompatible key %r for value %r; this \"\n \"collection's \"\n \"keying function requires a key of %r for this value.\" % (\n incoming_key, value, new_key))\n yield value", "def get_edited_cart_ings(cls, form):\n\n ings_dict = {}\n\n for r in form:\n try:\n i = int(r[-2:])\n if i in ings_dict:\n ings_dict[i].append([r, form[r]])\n else:\n ings_dict[i] = [[r, form[r]]]\n except Exception:\n i = int(r[-1:])\n if i in ings_dict:\n ings_dict[i].append([r, form[r]])\n else:\n ings_dict[i] = [[r, form[r]]]\n ings_dict[i].sort()\n\n ings_to_add = ings_dict.values()\n\n return ings_to_add", "def buildDict(self, words):\n for word in words:\n length = len(word)\n key = \"{}/{}\".format(length, word[0])\n ls = self.origin.get(key, [])\n ls.append(word)\n self.origin[key] = ls", "def complete_form_data():\n\n missing_fields = {\n 'link' : 'http://bvsalud.org',\n 'originator' : 'BIREME',\n 'source_type': 1,\n 'source_language': 1,\n 'originator_location' : 1,\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',\n\n 'main-descriptor-content_type-object_id-0-id' : '',\n 'main-descriptor-content_type-object_id-0-text' : 'malaria',\n 'main-descriptor-content_type-object_id-0-code' : '^d8462',\n 'main-descriptor-content_type-object_id-0-status' : '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',\n 'main-resourcethematic-content_type-object_id-0-status' : '0',\n }\n\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n\n return complete_form_data", "def gen_output(json_dct, *args):\n keys_to_add = ('job_title', 'location', 'date', 'company', 'num_stars')\n for arg, key in zip(args, keys_to_add): \n if arg: \n json_dct[key] = arg\n\n return json_dct", "def get_formatted_dict(dct):\n\n user_info_list = dct[Constants.VALUE]\n dct.clear()\n\n for user_info in user_info_list:\n uid = user_info.pop(Constants.ID)\n dct[uid] = user_info\n\n return dct", "def toKVForm(self):\n return kvform.dictToKV(self.toArgs())", "def normalise_parameter(dict_of_dicts):\n new_inputs = []\n for key, inner_dict in dict_of_dicts.items():\n if not isinstance(inner_dict, dict):\n inner_dict = { 'type': inner_dict }\n inner_dict['id'] = key\n new_inputs.append(inner_dict)\n return new_inputs", "def make_fields(self):\n for name, prop in self.edit:\n instance_value = self.model.get(name)\n post_value = self.data[name] if (self.data and self.data.has_key(name)) else instance_value\n form_field_class = self.get_field_type(prop)\n form_field = form_field_class(model=self.model, property=prop, name=name, instance_value=instance_value, post_value=post_value)\n self.add(form_field)", "def DictFunction():\r\n print \"{name} is from {city}, and he likes {cake} cake, {fruit} fruit, {salad} salad and {pasta} pasta\".format(**food_prefs)", "def _format_dict(self, dict):\n\n result = \"\"\n for k, v in dict.items():\n result += \"\\n{0}: {1}\".format(k.capitalize(), v)\n\n return result", "def categorize_reads(f_dict, titer):\n for template in titer:\n for mate in template:\n mate['cat_list'] = mate.get('cat_list', []) + [k for k, f in f_dict.items() if f(mate)]\n yield template", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def _walk(self, d, depth=0):\n\n output = ''\n indent = 3\n header_width = 35 - depth*indent\n\n for k, v in sorted(d.items(), key=lambda x: x[0]):\n if isinstance(v, dict):\n output += \"\".ljust(depth * indent)+k+'\\n'\n output += self._walk(v, depth + 1)\n else:\n if isinstance(v, np.ndarray):\n # np array or matrix\n shape = v.shape\n if len(shape) == 1:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"array (%d)\" % (v.shape[0]) + '\\n'\n\n elif len(shape) == 2:\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : \" + \"matrix (%d,%d)\" % (v.shape[0], v.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], str):\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : \" + str(item) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], np.ndarray):\n # List of arrays\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n for item_id, item in enumerate(v):\n if len(item.shape) == 1:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : array (%d)\" % (item.shape[0]) + '\\n'\n\n elif len(item.shape) == 2:\n output += \"\".ljust((depth+1) * indent)\n output += (\"[\"+str(item_id)+\"]\").ljust(header_width-3) + \" : matrix (%d,%d)\" % (item.shape[0], item.shape[1]) + '\\n'\n\n elif isinstance(v, list) and len(v) and isinstance(v[0], dict):\n output += \"\".ljust(depth * indent)\n output += k.ljust(header_width) + \" : list (%d)\\n\" % len(v)\n\n for item_id, item in enumerate(v):\n output += \"\".ljust((depth + 1) * indent) + \"[\"+str(item_id)+\"]\" + '\\n'\n output += self._walk(item, depth + 2)\n\n else:\n output += \"\".ljust(depth * indent) + k.ljust(header_width) + \" : \" + str(v) + '\\n'\n\n return output", "def update_dict(new,old):", "def process(list_, dict_, keyword):\n if len(list_) == 4:\n name, val, type_, frac_ = list_[0], list_[1], list_[2], list_[3]\n elif list_[0] == 'direc':\n name, val = list_[0], [list_[i] for i in range(len(list_)) if i > 0]\n else:\n name, val = list_[0], list_[1]\n\n if name not in dict_[keyword].keys() and name in ['coeff']:\n dict_[keyword][name] = []\n if keyword in ['TREATED', 'UNTREATED', 'COST'] and 'types' not in dict_[keyword].keys():\n dict_[keyword]['types'] = []\n if keyword in ['TREATED', 'UNTREATED', 'COST']:\n if len(list_) == 4:\n dict_[keyword]['types'] += [[type_, float(frac_)]]\n else:\n dict_[keyword]['types'] += ['nonbinary']\n\n # Type conversion\n if name in ['agents', 'seed', 'maxiter', 'disp']:\n val = int(val)\n elif name in ['source', 'file', 'optimizer', 'start']:\n val = str(val)\n elif name in ['direc']:\n val = list(val)\n else:\n val = float(val)\n if name in ['coeff']:\n dict_[keyword][name] += [val]\n else:\n dict_[keyword][name] = val\n # Finishing.\n return dict_", "def _fillData(self, extras):\n e = {k : (\" \".join(v[\"words\"])\n if type(v) == dict and \"words\" in v else v) for k, v in extras.items()}\n return self.data % e", "def _sanitise_fields(self, record):\n sanitised = {}\n for k, v in record.items():\n new_key = k.replace('(', '_').replace(')', '_')\n sanitised[new_key] = v\n return sanitised", "def dict_to_widgets(self, item=None):\n\n for bcs in self.data:\n bc = self.data[bcs]\n bc = {k.lower(): v for k, v in bc.items()}\n print(\"BCXCCCC\", bc)\n if item:\n # set all keys to lower\n print(\"self.data[bcs]:\", self.data[bcs])\n if bc[\"Name\".lower()] == item.text():\n #bc = self.data[bcs]\n self.lineedit_name_eq.setText(bc[\"Name\".lower()])\n else:\n continue\n else:\n #bc = self.data[bcs]\n ## set all keys to lower\n #bc = {k.lower(): v for k, v in bc.items()}\n pass\n for label in self.dynamic_widgets:\n if label.text() == \"Create new solver\":\n print(\"CREATE NRE SOLCER\")\n setting = bc.get(label.text().lower())\n widget = self.dynamic_widgets[label]\n widget_type = widget.metaObject().className()\n if setting is None: # parameter not stored in bc block\n if widget_type == \"QLineEdit\":\n widget.setText(\"\")\n elif widget_type == \"QCheckBox\":\n widget.setChecked(0)\n elif widget_type == \"QComboBox\":\n idx = widget.findText(\"None\")\n widget.setCurrentIndex(idx)\n elif widget_type == \"RadioComboGroup\":\n if widget.text().lower() == \"Linear System Solver\".lower():\n combo_text = bc.get(str(\"Linear System \"+str(setting)+\" Method\").lower())\n elif widget.text().lower() == \"Exec Solver\".lower():\n combo_text = None\n widget.setSelected(setting, combo_text)\n else:\n if widget_type == \"QLineEdit\":\n widget.setText(bc[label.text().lower()])\n #if label.text() == 'Active solvers':\n # _solvers = bc['active solvers'].split()\n # print('_SOLVERS', _solvers, bc[\"name\"].replace(' ','_') + '_solver')\n # if bc[\"name\"].replace(' ','_') + '_solver' in _solvers:\n # print(\"IS INSIDE\")\n # self.check_new_solver.setChecked(1)\n # if self.check_new_solver.isChecked():\n # print(\"IS CHECKED CHECKED\")\n # # widget.setText(widget.text()+' '+bc['solvers of eq']['name'].replace(' ', '_'))\n elif widget_type == \"QCheckBox\":\n true = ['Logical True'.lower(), 'True'.lower()]\n print(\"widget.TEXT()\", label.text())\n if bc[label.text().lower()].lower() in true:\n widget.setChecked(1)\n #if widget.text() == \"Create new solver\":\n # print(\"CREATE NEW SOLVER INSIDE\")\n # if bc[\"solvers of eq\"].get(\"name\"):\n # #and widget.isChecked():\n # print(\"CREATE NEW SOLVER CHECKED\")\n # # pass new solver to active solvers\n # for active_solvers_label in self.dynamic_widgets:\n # if active_solvers_label.text() == \"Active solvers\":\n # active_solvers = self.dynamic_widgets[active_solvers_label]\n # current_solvers = active_solvers.text()\n # updated_solvers = current_solvers +\" \"+bc['solvers of eq']['name']\n # active_solvers.setText(updated_solvers)\n else:\n print(\"LABEL TEXT SET()\", label.text())\n if label.text() != 'Create new solver':\n widget.setChecked(0)\n elif widget_type == \"QComboBox\":\n try:\n idx = widget.findText(bc[label.text().lower()])\n widget.setCurrentIndex(idx)\n except:\n print(\"QCombobox does not contain this parameter!\")\n elif widget_type == \"RadioComboGroup\":\n if widget.text().lower() == \"Linear System Solver\".lower():\n combo_text = bc.get(str(\"Linear System \"+str(setting)+\" Method\").lower())\n elif widget.text().lower() == \"Exec Solver\".lower():\n combo_text = None\n widget.setSelected(setting, combo_text)\n break\n\n # check QLineEdit for Active solvers and update CheckBox for new solver\n for active_solvers_label in self.dynamic_widgets:\n if active_solvers_label.text() == \"Active solvers\":\n print(\"ACTIVE SOLVERS\")\n active_solvers = self.dynamic_widgets[active_solvers_label]\n current_solvers = active_solvers.text().split()\n if bc[\"name\"].replace(' ','_') + '_solver' in current_solvers:\n self.check_new_solver.setChecked(1)", "def construct_request_obj(cls, dict_):\n\n entry_object = {\"entry\": dict_}\n\n return entry_object", "def _template_data(self):\n return {\"form\": self.form.render()}", "def flatten_json_struct(data, count_fields=[], datetime_fields=[]):\n for k,v in data.items():\n if v and type(v) != dict and type(v) != list:\n if k in datetime_fields and re_prog.match(v):\n #print('> yielding date {0}'.format(k))\n yield k, date_parser.parse(v).date()\n else:\n #print('> yielding value {0}: {1}'.format(k, v))\n yield k, v\n elif type(v) == list:\n if k in count_fields:\n #print('> yielding count of {0}'.format(k))\n yield k, len(v)\n else:\n new_data = { _generate_name(k,idx):val for idx,val in enumerate(v) }\n #print ('recursing %s' % new_data)\n for item in flatten_json_struct(new_data,\n count_fields=count_fields,\n datetime_fields=datetime_fields):\n #print('> yielding {0}: {1}'.format(item, type(item)))\n yield item[0], item[1] \n elif type(v) == dict:\n new_data = { _generate_name(k, k1): v1 for k1, v1 in v.items()}\n #print ('recursing %s' % new_data)\n for item in flatten_json_struct(new_data,\n count_fields=count_fields,\n datetime_fields=datetime_fields):\n #print('> yielding {0}: {1}'.format(item, type(item)))\n yield item[0], item[1]", "def encode_dict(d):\n return \"&\".join([\"%s=%s\" % (align, score) for align, score in d.items() if '_' not in align])", "def _raw(self,key):\n for d in self.__dicts:\n if key in d:\n return d._raw(key)\n raise KeyError(key)", "def generateSuccessor(self, dictio):\n\t\treturn dictio", "def getForms(self, page):\n\n forms = HTTP.FORM_REGEX.findall(self.get[page]['data'])\n self.get[page]['forms'] = []\n for params, content in forms:\n form = {}\n\n form = dict(HTTP.PARAM_REGEX.findall(params))\n\n # Parsing regular inputs\n inputs = HTTP.INPUT_REGEX.findall(content)\n form['inputs'] = {}\n for e in inputs:\n params = dict(HTTP.PARAM_REGEX.findall(e))\n\n try: name = params['name']\n except: name = params['type']\n\n form['inputs'][name] = params\n if 'required' in params: form['inputs'][name]['required'] = True\n\n # Parsing text areas\n txtareas = HTTP.TXTAREA_REGEX.findall(content)\n for txtarea in txtareas:\n params = dict(HTTP.PARAM_REGEX.findall(txtarea))\n\n form['inputs'][params['name']] = params\n form['inputs'][params['name']]['type'] = 'textarea'\n if 'required' in params: form['inputs'][params['name']]['required'] = True\n\n # Parsing select inputs\n selects = HTTP.SELECT_REGEX.findall(content)\n for params, value in selects:\n params = dict(HTTP.PARAM_REGEX.findall(params))\n\n form['inputs'][params['name']] = params\n form['inputs'][params['name']]['type'] = 'select'\n form['inputs'][params['name']]['value'] = value\n if 'required' in params: form['inputs'][params['name']]['required'] = True\n\n\n self.get[page]['forms'].append(form)\n return self.get[page]['forms']", "def fillDict(valDict, nowDate=datetime.now()):\n copyDict = copy.deepcopy(valDict)\n copyDict[names.year] = nowDate.year\n copyDict[names.month] = nowDate.month\n copyDict[names.day] = nowDate.day\n return copyDict", "def concatenate_record(record):\n new_record = {}\n for k,v in record.items():\n if k in ['AB','FX','PA','TI','RP','ID']:\n new_v = ' '.join(v)\n \n if k == 'ID':\n new_v = new_v.split('; ')\n \n new_record[k] = new_v\n elif k == 'CR':\n previous_citation = ''\n new_citations = []\n for citation in v:\n if previous_citation.endswith('DOI'):\n new_citations[-1] += ' ' + citation\n previous_citation = new_citations[-1]\n else :\n new_citations.append(citation)\n previous_citation = citation\n \n new_record[k] = new_citations\n else :\n new_record[k] = v\n \n return new_record" ]
[ "0.6394135", "0.6110445", "0.600457", "0.59126186", "0.586534", "0.5860337", "0.5741977", "0.5653947", "0.56515414", "0.5616137", "0.5573657", "0.55662775", "0.53771883", "0.5328441", "0.5310097", "0.52721184", "0.52581376", "0.52523905", "0.52309453", "0.52180254", "0.52097875", "0.5200536", "0.51983", "0.5185951", "0.5177609", "0.51679426", "0.51631033", "0.5160443", "0.5158311", "0.5134957", "0.5129694", "0.51260036", "0.512073", "0.5105719", "0.50992936", "0.5092117", "0.5086856", "0.50832975", "0.50717175", "0.50641", "0.5062254", "0.5057962", "0.5056234", "0.5031616", "0.50268835", "0.50178367", "0.5007826", "0.4988451", "0.49857756", "0.49840075", "0.49418458", "0.49334756", "0.49306613", "0.49282742", "0.49168378", "0.4915209", "0.49019647", "0.48976794", "0.48899823", "0.48864317", "0.48858547", "0.48857707", "0.48826253", "0.48621476", "0.48604876", "0.48602092", "0.48557645", "0.48468733", "0.48404932", "0.48397437", "0.482809", "0.48270628", "0.48209047", "0.47953945", "0.4789883", "0.4772259", "0.4770218", "0.47676742", "0.4756896", "0.4756725", "0.47503906", "0.47470325", "0.4741088", "0.4737602", "0.47374156", "0.47373727", "0.47315466", "0.4730869", "0.47279793", "0.47268182", "0.4724991", "0.47216496", "0.4721598", "0.47194028", "0.471728", "0.47158104", "0.47150922", "0.47133934", "0.47114682", "0.47112235" ]
0.5852613
6
check if str startswith
def val_starts_with(base_string, strings): for the_string in strings: if base_string.startswith(the_string): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startswith(value, s):\n\n if not value: return False\n return value.find(s) == 0", "def starts_with(s, prefix):\n if prefix == '':\n return True\n elif s[0] != prefix[0]:\n return False\n else: # s[0] == prefix[0]\n return starts_with(s[1:], prefix[1:])", "def starts_with(strn, prefix):\n return strn.startswith(prefix)", "def starts(str_, val_to_check):\n \n return (str_.startswith(val_to_check))", "def startswith(self, s):\n return self.peek((0, len(s))).startswith(s)", "def startswith(self, s):\n return self.peek((0, len(s))).startswith(s)", "def is_prefix(prefix: str, word: str):\n return word.startswith(prefix)", "def check_prefix(custom_str: str) -> bool:\r\n\r\n return len(custom_str) == 0", "def starts_with(self, prefix: str) -> bool:\n return self.search(prefix, True)", "def startsWith(self, prefix: str) -> bool:\n curr_chars = self.chars\n for c in list(prefix):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return True", "def startswith(self, other):", "def startsWith(self, prefix: str) -> bool:\n return bool(self.find(prefix))", "def beginswith(self, val):\n\t\treturn BeginsWith(self, val)", "def startsWith(self, p: str) -> bool:\n return not p or p[0] in self.d and self.d[p[0]].startsWith((len(p) > 1 and p[1:]) or '')", "def startswith(self, prefix, start=0, end=None):\n return startswith(self, prefix, start, end)", "def startsWith(self, prefix: str) -> bool:\n word = prefix\n if len(word) == 0:\n return True\n idx = ord(word[0]) - ord('a')\n if self.children[idx] is None:\n return False\n return self.children[idx].startsWith(word[1:])", "def startswith(list, prefix):\n\n return list[:len(prefix)] == prefix", "def startsWith(self, prefix):\n pointer = self.tries\n for i in range(len(prefix)):\n ascii = ord(prefix[i]) - ord('a')\n if pointer[ascii] == None:\n return False\n pointer = pointer[ascii]\n return True", "def startsWith(self, prefix: 'str') -> 'bool':\n p = self.root\n for ch in prefix:\n if ch in p:\n p = p[ch]\n else:\n return False\n return True", "def startsWith(self, prefix):\n if prefix[0] not in self.trie:\n return False\n cur = self.trie[prefix[0]]\n for char in prefix[1:]:\n if char not in cur.nexts:\n return False\n cur = cur.nexts[char]\n return True", "def startswith( self, prefix ):\n return len(self.commands) >= 1 and self.commands[0].startswith( prefix )", "def startsWith(self, prefix):\r\n t = self.trie\r\n for w in prefix: \r\n if w not in t: \r\n return False\r\n t = t[w]\r\n return True", "def startsWith(self, prefix):\n now = self.tree\n for i in prefix:\n if i in now:\n now = now[i]\n else:\n return False\n return True", "def startswith(a, prefix, start=0, end=None):\n return _vec_string(\n a, bool_, 'startswith', [prefix, start] + _clean_args(end))", "def startsWith(self, prefix: str) -> bool:\n return self._traverse(prefix)", "def starts_with(text, substring):\n assert text.startswith(substring), \"%r doesn't start with %r\" % (text,\n substring)", "def test_starts_letter(x):\n return x[0].isalpha()", "def istartswith(self, other):", "def is_prefix(trie, string: str) -> bool:\n return any(w.startswith(string) for w in trie)", "def startsWith(self, prefix):\n level = self.trie\n for c in prefix:\n if c in level:\n level = level[c]\n else:\n return False\n return True", "def startsWith(self, prefix: str) -> bool:\n cur = self.root\n for letter in prefix:\n if letter not in cur:\n return False\n cur = cur[letter]\n return True", "def has_prefix(cls, string1, string2):\n return len(cls.get_prefix(string1, string2)) > 0", "def startsWith(self, prefix):\n cur = self._dict\n for c in prefix:\n ind = ord(c) - 97\n if cur.children[ind] == None:\n return False\n cur = cur.children[ind]\n\n return True", "def startswith(self, base):\n if self.path_is_string:\n return self.path.startswith(base)\n if not self.path:\n return not bool(base)\n if self.path_type is list and len(self.path) is 1:\n return self.path[0].startswith(base)\n return self.joined().startswith(base)", "def special_prefix(key):\n for x in (\"STARFISH_\", \"SLICEDIMAGE_\"):\n if key.startswith(x):\n return True\n return False", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for char in prefix:\n if char not in node:\n return False\n node = node[char]\n return True", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for char in prefix:\n if char not in node:\n return False\n node = node[char]\n return True", "def match(self, encoded):\n encoded = check_unicode(encoded)\n return encoded.startswith(self.PREFIX)", "def startsWith(self, prefix: str) -> bool:\n node = self\n for c in prefix:\n node = node.d.get(c)\n if not node:\n return False\n return True", "def startsWith(self, prefix: str) -> bool:\n temp=self.root\n for char in prefix:\n index=ord(char)-ord('a')\n \n if(not temp.children[index]):\n return False\n temp=temp.children[index]\n \n return True", "def startsWith(self, prefix):\n tri = self.root.d\n \n \n if len(prefix) == 0: \n return True\n \n if len(tri) == 0:\n return False\n \n p = 0\n \n for i in xrange(len(prefix)):\n if tri != 0 and prefix[i] in tri:\n tri = tri[prefix[i]]\n else:\n return False\n \n return True", "def startsWith(self, prefix):\n ret = True\n curr = self.trie\n for i, ch in enumerate(prefix):\n curr = curr.get(ch, {})\n if curr:\n continue\n else:\n break\n \n if i==len(prefix)-1:\n ret = True\n else:\n ret = False\n return ret", "def start_with(self, prefix):\n node = self.search_prefix(prefix)\n return node is not None", "def test_match_start_check_at_beginning_of_string(self):\n first_letter = \"a\"\n s = \"abcdef\"\n self.assertEqual(__, re.search(first_letter, s).group())", "def startsWith(self, prefix):\n node = self.root\n for char in prefix:\n if char in node.dict:\n node = node.dict[char]\n else:\n return False\n return True", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for c in prefix:\n if c not in node:\n return False\n node = node[c]\n return True", "def start_with(self, prefix):\n return self.__find_node(prefix) != None", "def startsWith(self, prefix: str) -> bool:\r\n node=self.root\r\n for c in prefix:\r\n if c not in node:\r\n return False\r\n else:\r\n node = node[c]\r\n return True", "def startsWith(self, prefix: str) -> bool:\r\n nroot=self.root\r\n for i in prefix:\r\n # index=ord(i)-ord('a')\r\n if not nroot.children:\r\n return False\r\n nroot=nroot.children[i]\r\n return True", "def startsWith(self, prefix: str) -> bool:\n node = self.head\n for c in prefix:\n if c not in node.next:\n return False\n node = node.next[c]\n return True", "def isPrefix(string, dictionary):\n strLen = len(string)\n for word in dictionary:\n if word[:strLen] == string:\n return True\n return False", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for char in prefix:\n if char in node.child:\n node = node.child.get(char)\n else:\n return False\n return True", "def islower(self) -> bool:\n pass", "def startsWith(self, prefix: str) -> bool:\n current = self.root\n for letter in prefix: \n current = current.children.get(letter)\n if not current:\n return False\n return True", "def startsWith(self, prefix):\n curNode = self.root\n for c in prefix:\n if not c in curNode:\n return False\n curNode = curNode[c]\n \n return True", "def startsWith(self, prefix: str):\n node = self.root\n for letter in prefix:\n if letter not in node.child:\n return False\n else:\n node = node.child[letter]\n return True", "def has_prefix_some(s, prefix_set):\n\tfor prefix in prefix_set:\n\t\tif s.find(prefix, 0) != -1:\n\t\t\treturn True\n\treturn False", "def any_lowercase1(s):\n for c in s:\n if c.islower():\n return True\n else:\n return False", "def any_lowercase1(s):\n for c in s:\n if c.islower():\n return True\n else:\n return False", "def starts_with(self, prefix):\n node = self.root\n node = node.get(prefix)\n if not node:\n return False\n return True", "def startsWith(self, prefix: str) -> bool:\n curr = self.root\n for c in prefix:\n if not c in curr.adj:\n return False\n curr = curr.adj[c]\n return True", "def test_evaluate_starts_with_expression(self):\n value = self.evaluate_common(\"startswith('startswith','start')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True)\n value = self.evaluate_common(\"startswith('startswith','end')\")\n self.assertTrue(value.value is False)\n value = self.evaluate_common(\"startswith('startswith','Start')\")\n # not case insensitive\n self.assertTrue(value.value is False)\n try:\n value = self.evaluate_common(\"startswith('3.14',3)\")\n self.fail(\"integer as prefix\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"startswith('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass", "def startsWith(self, prefix):\n current = self.root\n for letter in prefix:\n current = current.children.get(letter)\n if current is None:\n return False\n return True", "def test_lowerFirst(string, expected):\n assert lowerFirst(string) == expected", "def startsWith(self, prefix: str) -> bool:\n\t\tcurrent_node = self.root\n\t\tfor ch in prefix:\n\t\t\tfound_in_child = False\n\t\t\tfor node in current_node.children:\n\t\t\t\tif node.char == ch:\n\t\t\t\t\tfound_in_child = True\n\t\t\t\t\tcurrent_node = node\n\t\t\tif found_in_child == False: # some char not found anywhere\n\t\t\t\treturn False\n\t\treturn True", "def _starts_with_op(spec):", "def startsWith(self, prefix: str) -> bool:\n n = self.root\n for l in prefix:\n cn = n.get_child_with_val(l)\n if cn == None:\n return False\n n = cn\n return True", "def startsWith(self, prefix: str) -> bool:\n parent = self.root\n for char in prefix:\n if char not in parent.children:\n return False\n parent = parent.children[char]\n return True", "def starts_with(self, prefix: str) -> bool:\n curr = self.root\n for ch in prefix:\n curr = curr.children.get(ch)\n if curr is None:\n return False\n return True", "def is_simple_name(s):\n\n assert utils.is_string_type(s)\n assert len(s) > 0\n\n def valid_first(c):\n return c.isalpha() or c == \"_\"\n def valid_later(c):\n return c.isalnum() or c == \"_\"\n return valid_first(s[0]) and all(valid_later(c) for c in s)", "def startsWith(self, prefix: str) -> bool:\n # Looping through the list.\n for x in self.mylist:\n \n # Checking if the current element starts with the prefix.\n if x.startswith(prefix):\n return True\n # If no element start with prefix then return False.\n return False", "def starts_with_vowel(word):\n return True if word[0] in 'aeiou' else False", "def startsWith(self, prefix):\n node = self.root\n for letter in prefix:\n if letter not in node.children:\n return False\n node = node.children[letter]\n return True", "def startsWith(self, prefix):\n node = self.root\n for letter in prefix:\n if letter not in node.children:\n return False\n node = node.children[letter]\n return True", "def startsWith(self, prefix):\n current = self.root\n for i in prefix:\n if current.hash_map.get(i) is None:\n return False\n current = current.hash_map.get(i)\n return True\n\n\n # Your Trie object will be instantiated and called as such:\n # obj = Trie()\n # obj.insert(word)\n # param_2 = obj.search(word)\n # param_3 = obj.startsWith(prefix)", "def startsWith(self, prefix: str) -> bool:\n currnode=self.root\n\n for ch in prefix:\n node=currnode.children.get(ch)\n\n if not node:\n return False\n currnode=node\n \n return True", "def any_lowercase1(s):\n\tfor c in s:\n\t\tif c.islower():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for w in prefix:\n node = node.children.get(w)\n if not node:\n return False\n return True", "def command_basename_startswith(self, op):\n return self.__command_basename.startswith(op)", "def hasIdentifierBeginningWith(self, *args):\n return _libsbml.SBasePlugin_hasIdentifierBeginningWith(self, *args)", "def startsWith(self, prefix):\n node = self.root\n for c in prefix:\n if c in node.children:\n node = node.children[c]\n else:\n return False\n return True", "def startswith(self, prefix, node=None):\n if not prefix:\n return True\n\n if node is None:\n node = self.root\n\n for child in node.children:\n if prefix[0] == child.value:\n next_node = child\n break\n else:\n return False\n\n return self.startswith(prefix[1:], next_node) and True", "def startsWith(self, prefix: str) -> bool:\n current = self.root\n for letter in prefix:\n if letter not in current.children:\n return False\n current = current.children[letter]\n return True", "def any_lowercase4(s):\n flag = False\n for c in s:\n flag = flag or c.islower()\n return flag", "def any_lowercase4(s):\n flag = False\n for c in s:\n flag = flag or c.islower()\n return flag", "def stringcheck(self, rule, string):\n if not \"*\" in rule:\n return rule in string\n elif rule[0] == \"*\":\n return string.endswith(rule[1:])\n elif rule[-1] == \"*\":\n return string.startswith(rule[:-1])\n else:\n start, end = rule.split(\"*\")\n return string.startswith(start) and string.endswith(end)", "def isSingleLetter(self, word):\n return (re.match('^\\w$', word)) != None", "def check_string(str_one, str_two):\n str_one = str_one.lower()\n str_two = str_two.lower()\n # print(str_one,str_two)\n if len(str_two) < len(str_one):\n return bool(re.search(str_two+'$',str_one))\n else:\n return bool(re.search(str_one+'$',str_two))", "def is_lower_than(string):\n if '<' in string:\n return True\n return False", "def check_suffix(custom_str: str) -> bool:\r\n\r\n if custom_str.startswith(\"-\"):\r\n return True\r\n if len(custom_str) < 4:\r\n custom_str = custom_str.lower()\r\n for c in ASCII_LOWER:\r\n if c in custom_str:\r\n return True\r\n return False", "def startsWith(self, prefix):\n return self.dfsSearch(self.root, prefix, 0, True)\n\n\n # Your Trie object will be instantiated and called as such:\n # obj = Trie()\n # obj.insert(word)\n # param_2 = obj.search(word)\n # param_3 = obj.startsWith(prefix)", "def startsWith(self, prefix):\n currNode = self.root\n\n for c in prefix:\n if c not in currNode.children:\n return False\n currNode = currNode.children[c]\n return True", "def _is_substring(s1, s2):\n\treturn s1.find(s2) != -1", "def substring_match(recipe, word):\n if names_only:\n line = recipe.name\n else:\n line = str(recipe)\n\n if not case:\n word = word.lower()\n line = line.lower()\n\n return line.find(word) != -1", "def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None", "def check_word(self, word):\n first_letter, rest = word[0], word[1:]\n\n for possible_start in self._find_letter(first_letter):\n if self._check_word(possible_start, rest):\n return True\n\n return False", "def stillLookingForPrefix(self, prefix):\n return prefix in self._prefixToIdentifiers", "def startswith(self, value):\n newq = self.copy()\n newq.setOp(Query.Op.Startswith)\n newq.setValue(value)\n return newq", "def startsWith(self, prefix: str) -> bool:\n node = self.root\n for c in prefix:\n if c not in node.children:\n return False\n return True", "def has_prefix(x: list):\n for val in x:\n if len(list(filter(val.startswith, x))) > 1:\n return True\n\n return False" ]
[ "0.83619857", "0.80160236", "0.7839395", "0.782759", "0.77381086", "0.77381086", "0.7594189", "0.7528208", "0.74058926", "0.7379807", "0.7277057", "0.727268", "0.7236201", "0.7222695", "0.72225994", "0.71874356", "0.7143091", "0.71142554", "0.7009802", "0.6997398", "0.6966478", "0.6931369", "0.6899895", "0.68741506", "0.6868457", "0.68552136", "0.6854271", "0.68372005", "0.6821264", "0.67909366", "0.67781955", "0.67767245", "0.67465734", "0.67156386", "0.6689372", "0.66860527", "0.66860527", "0.6620619", "0.661042", "0.6581603", "0.6574471", "0.6563537", "0.6543715", "0.6538193", "0.65349704", "0.6518228", "0.64818037", "0.64747715", "0.64650744", "0.6452662", "0.6440625", "0.63879716", "0.6384204", "0.6375581", "0.6365438", "0.6345944", "0.6341117", "0.6335088", "0.6335088", "0.6331743", "0.6311071", "0.6305904", "0.6298935", "0.62867975", "0.62828803", "0.6271636", "0.62674856", "0.6266694", "0.6265685", "0.62651277", "0.6259191", "0.6255538", "0.6249362", "0.6249362", "0.6237187", "0.6210596", "0.62038225", "0.6189826", "0.61734724", "0.6167298", "0.6132087", "0.6118336", "0.611829", "0.61138415", "0.61138415", "0.6094134", "0.6089677", "0.60785353", "0.6078192", "0.6075384", "0.60684246", "0.6024663", "0.60230905", "0.6019462", "0.59976786", "0.59711754", "0.5964713", "0.59646094", "0.59581196", "0.5952231" ]
0.7260305
12
Generates a dict of dicts from dot separated keys. Yet without associated values.
def make_tree(dot_separated_keys): tree = {} for item in dot_separated_keys: inside_tree = tree for part in item.split('.'): inside_tree = inside_tree.setdefault(part, {}) return tree
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undotted_keys(dict):\n return {k.lstrip(\".\"): v for k, v in dict.items()}", "def hdict(keys, value, sep=\".\"):\n return reduce(lambda v, k: {k: v}, reversed(keys.split(sep)), value)", "def create_recursive_dot_dict(data: Dict[str, Any], cls=DotDict) -> Union[DotDict, DotDefaultDict]:\n res = cls()\n for k, v in data.items():\n k = k.split(\".\")\n target = res\n for i in range(0, len(k)-1):\n t2 = target.get(k[i])\n if t2 is None:\n t2 = cls()\n target[k[i]] = t2\n\n assert isinstance(t2, cls), f\"Trying to overwrite key {'.'.join(k[:i+1])}\"\n target = t2\n\n assert isinstance(target, cls), f\"Trying to overwrite key {'.'.join(k)}\"\n target[k[-1]] = v\n return res", "def create_namespace_tree(dotted_names):\r\n ret = {}\r\n for dn in dotted_names:\r\n path = dn.split('.')\r\n for i in xrange(len(path)):\r\n ns = '.'.join(path[:i])\r\n itempath = '.'.join(path[:i + 1])\r\n if ns not in ret:\r\n ret[ns] = []\r\n if itempath not in ret[ns]:\r\n ret[ns].append(itempath)\r\n return ret", "def convert_dotKeyToNestedDict(self, tree, key, value):\n\n t = tree\n if \".\" in key:\n key, rest = key.split(\".\", 1)\n if key not in tree:\n t[key] = {}\n self.convert_dotKeyToNestedDict(t[key], rest, value)\n else:\n t[key] = value\n\n return t", "def dict_by_key(d, k):\n k = k.split('.')\n while len(k) != 0:\n if isinstance(d, dict):\n d = d[k[0]]\n else:\n d = d[int(k[0])]\n k = k[1:]\n return d", "def flatten_dict_string_keys(x):\n return {'/'.join(k): v for k, v in flatten_dict(unfreeze(x)).items()}", "def dottify(self, base_name):\n obj_dict = vars(self)\n dotted_dict = {}\n for k, v in obj_dict.items():\n if v is not None:\n dotted_dict[base_name + '.' + k] = v\n return dotted_dict", "def parse_dot_key(data, key):\n for key_part in key.split('.'):\n data = data.get(key_part)\n if data is None:\n break\n return data", "def set_dict_attrs(d, values):\n key = values.keys()[0]\n key_parts = key.split('.')\n if len(key_parts) > 1:\n if key_parts[:1][0] in d.keys():\n d[key_parts[:1][0]] = set_dict_attrs(d[key_parts[:1][0]],\n {'.'.join(key_parts[1:]): values.values()[0]})\n else:\n d[key_parts[:1][0]] = set_dict_attrs({}, {'.'.join(key_parts[1:]): values.values()[0]})\n else:\n d[key_parts[:1][0]] = values.values()[0]\n return d", "def replace_dots(son):\n for key, value in son.items():\n if '.' in key:\n new_key = key.replace('.', '_')\n if isinstance(value, dict):\n son[new_key] = replace_dots(\n son.pop(key)\n )\n else:\n son[new_key] = son.pop(key)\n elif isinstance(value, dict): # recurse into sub-docs\n son[key] = replace_dots(value)\n return son", "def _build_config(key, value, d):\n pieces = key.split('.', 1)\n if len(pieces) == 1:\n d[pieces[0]] = value.strip()\n else:\n d[pieces[0]] = _build_config(pieces[1], value, {})\n\n return d", "def test_dotwiz_plus_to_dict():\n dw = DotWizPlus(hello=[{\"Key\": \"value\", \"Another-KEY\": {\"a\": \"b\"}}],\n camelCased={r\"th@#$%is.is.!@#$%^&*()a{}\\:<?>/~`.T'e'\\\"st\": True})\n\n assert dw.to_dict() == {\n 'hello': [\n {\n 'Another-KEY': {'a': 'b'},\n 'Key': 'value',\n }\n ],\n 'camelCased': {\n 'th@#$%is.is.!@#$%^&*()a{}\\\\:<?>/~`.T\\'e\\'\\\\\"st': True\n },\n }", "def test_dotwiz_plus_to_attr_dict():\n dw = DotWizPlus(hello=[{\"Key\": \"value\", \"Another-KEY\": {\"a\": \"b\"}}],\n camelCased={r\"th@#$%is.is.!@#$%^&*()a{}\\:<?>/~`.T'e'\\\"st\": True})\n\n assert dw.to_attr_dict() == {\n 'hello': [\n {\n 'another_key': {'a': 'b'},\n 'key': 'value',\n }\n ],\n 'camel_cased': {'th_is_is_a_t_e_st': True},\n }", "def convert_dot_notation(key, val):\n split_list = key.split('.')\n if len(split_list) == 1: # no dot notation found\n return key, val\n split_list.reverse()\n newval = val\n item = None\n for item in split_list:\n if item == split_list[-1]:\n return item, newval\n newval = {item:newval}\n return item, newval", "def path_to_dict(path: str, val: Any) -> Dict:\n d = val\n for k in reversed(path.split('.')):\n d = {k: d}\n return d", "def expand_objects(record):\n new_record = copy.deepcopy(record)\n for key, value in record.items():\n parts = key.split(\".\")\n if len(parts) > 1:\n parts.reverse()\n current = {parts[0]: value}\n for part in parts[1:]:\n current = {part: current}\n del new_record[key]\n new_record = merge_dicts(new_record, current)\n\n return new_record", "def del_dict_attrs(d, key):\n key_parts = key.split('.')\n if len(key_parts) > 1:\n d[key_parts[:1][0]] = del_dict_attrs(d[key_parts[:1][0]], '.'.join(key_parts[1:]))\n else:\n del d[key_parts[:1][0]]\n return d", "def FlattenDictionary(value, keys=[]):\n result = {}\n if type(value) is dict:\n for key in value:\n result.update(FlattenDictionary(value[key], keys + [key]))\n return result\n else:\n key = '.'.join(keys)\n return {key: value}", "def keys_breadth_first(self, include_dicts=False):\n namespaces = []\n for key in self._key_order:\n if isinstance(getattr(self, key), DotDict):\n namespaces.append(key)\n if include_dicts:\n yield key\n else:\n yield key\n for a_namespace in namespaces:\n for key in self[a_namespace].keys_breadth_first(include_dicts):\n yield '%s.%s' % (a_namespace, key)", "def flatten(d: MutableMapping, sep: str = \".\", parent_key: str = \"\") -> dict:\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def serialize_dict_keys(d, prefix=\"\"):\n keys = []\n for k, v in d.items():\n fqk = \"{}{}\".format(prefix, k)\n keys.append(fqk)\n if isinstance(v, dict):\n keys.extend(serialize_dict_keys(v, prefix=\"{}.\".format(fqk)))\n\n return keys", "def unflatten(dictionary, sep=\".\"):\n unflattened_dictionary = {}\n for key, value in dictionary.items():\n parts = key.split(sep)\n sub_dictionary = unflattened_dictionary\n for part in parts[:-1]:\n if part not in sub_dictionary:\n sub_dictionary[part] = {}\n sub_dictionary = sub_dictionary[part]\n sub_dictionary[parts[-1]] = value\n return unflattened_dictionary", "def add_by_dot_path(dictionary: Dict, key_path: str, value: Any) -> Dict:\n return add_by_list_of_keys(dictionary, key_path.split(\".\"), value)", "def _dotted_dict_to_object(cls, dict_: dict, key: Key = None):\n\n dotted_pairs = {}\n for k, val in dict_.copy().items():\n if '.' in k:\n dotted_pairs[k] = val\n del dict_[k]\n\n class_dict = {}\n for k, val in dotted_pairs.items():\n class_, prop_key = k.split('.', 1)\n if isinstance(val, list):\n class_dict[class_] = class_dict.get(class_) or list()\n for i, each_val in enumerate(val):\n if len(class_dict[class_]) < i + 1:\n class_dict[class_].append(dict())\n class_dict[class_][i][prop_key] = each_val\n else:\n class_dict[class_] = class_dict.get(class_) or dict()\n class_dict[class_][prop_key] = val\n\n type_hints = get_type_hints(cls)\n for class_, nested_prop in class_dict.items():\n if isinstance(nested_prop, list):\n nested_prop_list = []\n for each_nested_prop in nested_prop:\n nested_prop_list.append(type_hints[class_].__args__[0](**each_nested_prop))\n dict_[class_] = nested_prop_list\n else:\n dict_[class_] = type_hints[class_](**nested_prop)\n\n filtered_dict = {k: v for k, v in dict_.items() if k in type_hints}\n obj = cls(**filtered_dict)\n if key:\n obj.key = key\n return obj", "def get_by_dot_path(dictionary: Dict, key_path: str) -> Any:\n return get_by_list_of_keys(dictionary, key_path.split(\".\"))", "def _flatten_metadata(metadata):\r\n if metadata:\r\n # After changing recursive_keypairs` output we need to keep\r\n # flattening output unchanged.\r\n # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.')\r\n # output before: a.b:c=d\r\n # output now: a.b.c=d\r\n # So to keep the first variant just replace all dots except the first\r\n return dict((k.replace('.', ':').replace(':', '.', 1),\r\n six.text_type(v))\r\n for k, v in utils.recursive_keypairs(metadata,\r\n separator='.')\r\n if type(v) is not set)\r\n return {}", "def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n items = dict(items)\n # remove info like PCA primitive ID\n items_not_strings = {k: v for k, v in items.items() if type(v) != str}\n return dict(items_not_strings)", "def flatten_dict(\n d, parent_key=\"\", sep=\".\", ignore_under_prefixed=True, mark_value=True\n):\n items = {}\n for k in d:\n if ignore_under_prefixed and k.startswith(\"__\"):\n continue\n v = d[k]\n if mark_value and k.startswith(\"_\") and not k.startswith(\"__\"):\n v = MarkValue(repr(v))\n\n new_key = sep.join((parent_key, k)) if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.update(\n flatten_dict(\n v, new_key, sep=sep, ignore_under_prefixed=True, mark_value=True\n )\n )\n else:\n items[new_key] = v\n\n return items", "def dict(self, keys) -> dict:\n return {k: self(k) for k in keys}", "def builddict(fname,ignorestrings=['#'],dictdelim='='):\n\tf = open(fname, \"r\")\n\tline = f.readline()\n\ti = 0\n\t\n\tparamdict={}\n\twhile line != '':\n\t\ttmp = line.strip()\n\t\tif tmp :\n\t\t\tfor st in ignorestrings:\n\t\t\t\ttmp = tmp.split(st)[0]\n\t\t\t\tif len(tmp) >1:\n\t\t\t\t\ttp = tmp.split(dictdelim)\n\t\t\t\t\tkey = tp[0].strip()\n\t\t\t\t\tval = tp[1].strip()\n\t\t\t\t\tparamdict[str(key)] = str(val) \n\t\tline=f.readline()\n\t\n\tf.close()\n\treturn paramdict", "def key_dict(from_dict):\n new_dict = {}\n old2new = {}\n new2old = {}\n for key in from_dict:\n k = normalizeUnicode(key, 'identifier')\n if k != key:\n i = ''\n while \"%s%s\" % (k, i) in new_dict:\n if not i:\n i = 1\n else:\n i += 1\n k = \"%s%s\" % (k, i)\n old2new[key] = k\n new2old[k] = key\n new_dict[k] = from_dict[key]\n return (list(new_dict.keys()), new_dict, old2new, new2old)", "def _prefixed_items_from_dict(values: Dict[str, namedtuple], item_prefix, prefix, tag_names: Set[str] = set([]),\n cumulative=False):\n result = {}\n for key, nt in values.items():\n item_key = \"%s%s\" % (item_prefix, key)\n item = _parse(nt, prefix, tag_names)\n if cumulative:\n item = _cumulative_diff(item, item_key)\n result[item_key] = item\n return result", "def unflatten(\n d: Dict[str, Any],\n base: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n if base is None:\n base = {}\n\n for key, value in d.items():\n root = base\n\n ###\n # If a dotted path is encountered, create nested dicts for all but\n # the last level, then change root to that last level, and key to\n # the final key in the path. This allows one final setitem at the bottom\n # of the loop.\n if '.' in key:\n *parts, key = key.split('.')\n\n for part in parts:\n root.setdefault(part, {})\n root = root[part]\n\n if isinstance(value, dict):\n value = unflatten(value, root.get(key, {}))\n\n root[key] = value\n\n return base", "def _prefixed(nt: namedtuple, prefix):\n result = {}\n for key, value in nt._asdict().items():\n result[prefix + key] = value\n return result", "def all_key_seqs(template):\n result = []\n for k, v in template.iteritems():\n if isinstance(v, dict):\n for suffix in all_key_seqs(v):\n result.append([k] + suffix)\n else:\n result.append([k])\n return result", "def create_schema_dict_from_fieldnames(fieldnames):\n schema_dict = {}\n keys_list_info = []\n '''Intermediate list to keep track of the level in the key list that is being processed'''\n for field in fieldnames:\n keys = field.split('.')\n keys_list_info.append({\n 'keys': keys,\n 'level': 0\n })\n\n '''Loops over all the keys in list one level at a time.'''\n while True:\n processed_keys = 0\n for row in range(len(keys_list_info)):\n keys_info = keys_list_info[row]\n if keys_info.get('level') < len(keys_info['keys']):\n schema_dict = add_or_update_key_in_dict(schema_dict, keys_info['keys'], level=keys_info.get('level'))\n processed_keys += 1\n keys_info['level'] += 1\n else:\n continue\n\n if not processed_keys:\n break\n\n return schema_dict", "def _convert_flat_to_nested_cols(cls, dic, separator='.'):\n for key in list(dic.keys()):\n if separator in key:\n new_key, nested_key = key.split(separator, 1)\n new_value = dic.get(new_key, {})\n new_value = {} if new_value in [None, np.nan, 'nan'] else new_value\n new_value[nested_key] = dic[key]\n dic.pop(key, None)\n new_value = cls._convert_flat_to_nested_cols(\n new_value, separator\n )\n dic[new_key] = new_value\n return dic", "def create_dicts_by_chain(keys_chain: list):\n result = {}\n current_dict = result\n for key in keys_chain:\n current_dict[key] = {}\n current_dict = current_dict[key]\n return result", "def gen_dict(keys, vals):\n retVal = {}\n for i in range(len(keys)):\n if i > len(vals):\n retVal[keys[i]] = \"\"\n continue\n retVal[keys[i]] = vals[i]\n return retVal", "def abridge_dict(schema, data):\n abridged_dict = {}\n for field in schema:\n value = get_dict_value(field.field_name, data)\n if value:\n keys = field.field_name.split('.')\n val = {keys.pop(-1): value}\n while len(keys):\n val = {keys.pop(-1): val}\n merge_dict(abridged_dict, val)\n return abridged_dict", "def expand_record(record, separator = '.'):\r\n result = {}\r\n for key, value in record.items():\r\n current = result\r\n path = key.split(separator)\r\n for part in path[:-1]:\r\n if part not in current:\r\n current[part] = {}\r\n current = current[part]\r\n current[path[-1]] = value\r\n return result", "def group(d: dict, *groups: str, delimiter=\"_\"):\n res = d.copy()\n for g in groups:\n if g in res:\n res[g] = {None: res[g]}\n for k in d:\n if type(k) is str and k.startswith(g + delimiter):\n if g in res:\n res[g][k.replace(g + delimiter, \"\")] = res[k]\n else:\n res[g] = {k.replace(g + delimiter, \"\"): res[k]}\n del res[k]\n return res", "def create_dictionary(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n words = text.split()\n d = {}\n current_word = '$'\n \n for next_word in words:\n if current_word not in d:\n d[current_word] = [next_word]\n else:\n d[current_word] += [next_word]\n if next_word[-1] == '.' or next_word[-1] == '!' or next_word[-1] == '?':\n current_word = '$'\n else:\n current_word = next_word\n return d", "def dict_path(d, path, default=None):\n\n keys = path.split('.')\n rv = d\n\n try:\n for key in keys:\n rv = rv.get(key)\n except AttributeError:\n return default\n\n return rv", "def remove_dash_and_underscore_from_key(d): # type: ignore\n\n if not isinstance(d, (dict, list)):\n return d\n elif isinstance(d, list):\n return [\n value\n for value in (\n remove_dash_and_underscore_from_key(value) for value in d\n )\n ]\n else:\n return {\n pascal_case(key): remove_dash_and_underscore_from_key(value)\n for key, value in d.items()\n }", "def prefix_keys(my_dict: Dict[str, Any], prefix: str) -> Dict[str, Any]:\n return {'{}{}'.format(prefix, key): value\n for key, value in my_dict.items()}", "def _split_dict(self, d, splits):\r\n ret = []\r\n for split in splits:\r\n dict_split = defaultdict(list)\r\n for f in split:\r\n if f in d:\r\n dict_split[f] = d[f]\r\n ret.append(dict_split)\r\n return ret", "def buildDict(self, words):\n self.dict = collections.defaultdict(set)\n for word in words:\n for i in xrange(len(word)):\n self.dict[word[:i] + '*' + word[i+1:]].add(word[i])", "def to_dotdict(data):\n\n if isinstance(data, dict):\n\n return dotdict(data)\n elif isinstance(data, list):\n return list_to_dotdict(data)", "def _expand_keys(entities):\n keys = list(entities.keys())\n values = list(product(*[entities[k] for k in keys]))\n return [{k: v for k, v in zip(keys, combs)} for combs in values]", "def makeDict(self, s):\n out = {}\n entries = s.split(self.dataDelimiterEntry)\n for e in entries:\n if e == \"\":\n continue\n c = e.split(self.dataDelimiterKey)\n out[c[0]] = c[1]\n return out", "def deep_get(_dict, keys, default=None):\n keys=keys.split('.')\n def _reducer(d, key):\n if isinstance(d, dict):\n return d.get(key, default)\n return default\n return reduce(_reducer, keys, _dict)", "def package_dict_items():\n for idx, (key, py_subobj) in enumerate(py_obj.items()):\n # Obtain the raw string representation of this key\n key_base_type = key.__class__.__name__.encode(\"utf8\")\n if isinstance(key,str):\n if not _str_slashes.search(key):\n yield r'\"{}\"'.format(key),py_subobj,{'key_idx':idx,'key_base_type':key_base_type},kwargs\n continue\n elif isinstance(key,bytes):\n if not _byte_slashes.search(key):\n try:\n h_key = key.decode(\"utf8\")\n except UnicodeError: # pragma no cover\n pass\n else:\n yield r'b\"{}\"'.format(h_key),py_subobj,{'key_idx':idx,'key_base_type':key_base_type},kwargs\n continue\n elif key_base_type in dict_key_types_dict:\n h_key = \"{!r}\".format(key)\n if not _str_slashes.search(h_key):\n yield h_key,py_subobj,{'key_idx':idx,'key_base_type':key_base_type},kwargs\n continue\n sub_node_name = key_value_pair_name.format(idx)\n yield sub_node_name,(key,py_subobj),{'key_idx':idx,'key_base_type':b'key_value'},kwargs", "def _flatten_dict(self, d: Mapping[str, Any]) -> Dict[str, Any]:\n nested = {k for k, v in d.items() if isinstance(v, (Mapping, Configuration))}\n if self._lowercase:\n result = {\n k.lower() + \".\" + ki: vi\n for k in nested\n for ki, vi in self._flatten_dict(d[k]).items()\n }\n result.update(\n (k.lower(), v)\n for k, v in d.items()\n if not isinstance(v, (Mapping, Configuration))\n )\n else:\n result = {\n k + \".\" + ki: vi\n for k in nested\n for ki, vi in self._flatten_dict(d[k]).items()\n }\n result.update(\n (k, v)\n for k, v in d.items()\n if not isinstance(v, (Mapping, Configuration))\n )\n return result", "def _to_bucket_dict(d: dict) -> dict:\n r = {}\n for attr in d:\n r[attr], t = {}, d[attr]\n for key in t:\n val, key = t[key], key.split(';')\n new_key = (key[0], key[1], key[2])\n r[attr][new_key] = val\n return r", "def secondary_keys_dicts(self):", "def flatten(data, delim='_'):\n result = {}\n\n def flatten_dict(keys, name=''):\n if isinstance(keys, collections.MutableMapping):\n for value in keys:\n flatten_dict(keys[value], \"{}{}{}\".format(name, value, delim))\n elif isinstance(keys, list):\n count = 0\n for value in keys:\n if isinstance(value, collections.MutableMapping):\n flatten_dict(value, \"{}{}{}\".format(name, count, delim))\n else:\n result[name[:-1]] = keys\n count += 1\n else:\n result[name[:-1]] = keys\n\n flatten_dict(data)\n return result", "def flatten_dict(base, v, d):\n if base != '':\n base = base + \".\"\n for k in d:\n if type(d[k]) in (type('a'), type(u'a'), type(1), type(1.1), type(False), type(None)):\n v[base + k] = d[k]\n elif type(d[k]) in (type([]), type((1,2))):\n v[base + k] = \", \".join(d[k])\n elif type(d[k]) == type({}):\n flatten_dict(base + k, v, d[k])\n else:\n print \"huh,\", type(d[k])", "def _get_keys(self, ckey):\n if self.has_key(ckey):\n doc = self[ckey]\n else:\n doc = [o for o in self.get_values(ckey)]\n if isinstance(doc, dict):\n for key in doc.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n else:\n yield ckey\n elif isinstance(doc, list):\n for item in doc:\n if isinstance(item, dict):\n for key in item.keys():\n if ckey.rfind('%s.' % key) == -1:\n combo = '%s.%s' % (ckey, key)\n yield combo\n vals = [v for v in self.get_values(combo)]\n for kkk in helper_loop(combo, vals):\n yield kkk\n elif isinstance(item, list):\n for elem in item:\n if isinstance(elem, dict):\n for kkk in elem.keys():\n yield '%s.%s' % (ckey, kkk)\n else:\n yield ckey\n else: # basic type, so we reach the end\n yield ckey\n else: # basic type, so we reach the end\n yield ckey", "def _test_config_generator_quoted_key():\n class Inner(config.Config):\n a = config.option(int, default=1, help=\"\")\n b = config.option_map(int, default={\"?\": 1, \"#\": 2, \"[\": 3}, help=\"\")\n\n class Config(config.Config):\n a = config.option_map(Inner, default={\"?\": Inner(), \"#\": Inner(), \"[\": Inner()}, help=\"\")\n b = config.option_map(int, default={\"?\": 1, \"#\": 2, \"[\": 3}, help=\"\")\n\n return Config, [\n # Simple map of Config.b first\n 'b.\"?\" = 1',\n 'b.\"#\" = 2',\n 'b.\"[\" = 3',\n # Structure map of Config.a starts\n '[a.\"?\"]',\n 'a = 1',\n 'b.\"?\" = 1',\n 'b.\"#\" = 2',\n 'b.\"[\" = 3',\n '[a.\"#\"]',\n 'a = 1',\n 'b.\"?\" = 1',\n 'b.\"#\" = 2',\n 'b.\"[\" = 3',\n '[a.\"[\"]',\n 'a = 1',\n 'b.\"?\" = 1',\n 'b.\"#\" = 2',\n 'b.\"[\" = 3',\n ]", "def fluff(src, sep=\".\"):\n return reduce(lambda x, y: _dmerge(x, y), [hdict(k, v, sep) for k, v in src.items()], {})", "def namedtuples2dicts(namedtuples):\n return {k: dict(v._asdict()) for k, v in namedtuples.items()}", "def get_many(self, keys: Iterable, version: Optional[int] = None) -> Dict[str, Any]:\n d = {}\n for k in keys:\n val = self.get(k, version=version)\n if val is not None:\n d[k] = val\n return d", "def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict", "def flatten_entrypoints(ep):\n def _flatten(d, prefix):\n d1 = {}\n for k, v in d.items():\n if isinstance(v, dict):\n yield from _flatten(v, prefix+'.'+k)\n else:\n d1[k] = v\n\n if d1:\n yield prefix, d1\n\n res = {}\n for k, v in ep.items():\n res.update(_flatten(v, k))\n return res", "def allkeys(self, as_str=False):\n for key in self.__allkeys((\"__ROOT__\",), {\"__ROOT__\": self}):\n yield \".\".join(key) if as_str else key", "def keys_nested(self, subkeys_as='list'):\n keys = []\n for k, v in sorted(self.items()):\n if isinstance(v, AttrDict) or isinstance(v, dict):\n if subkeys_as == 'list':\n keys.extend([k + '.' + kk for kk in v.keys_nested()])\n elif subkeys_as == 'dict':\n keys.append({k: v.keys_nested(subkeys_as=subkeys_as)})\n else:\n keys.append(k)\n return keys", "def insertable_dict(self):\n # .strip('_') is for type_\n return {\n 'f_' +\n p.key.strip('_'): getattr(\n self,\n p.key) for p in self.__mapper__.attrs}", "def build_flattened_key(prefix, key):\n return key if not prefix else prefix + \".\" + key", "def dict_keys(self, keys):\n return _(_dict(keys, self._))", "def sorting_dict(self):\n ### take length of key and write in new dictionary repaired number without dots\n d_rekey = dict()\n l_rekey = list()\n # take max level of hash\n level = self.take_max_level()\n for k,v in self.d.items():\n l_key = k.split(\":\")\n delta = level - len(l_key)\n new_key = (\"\".join(l_key) + str(0)*delta)[1:]\n d_rekey[new_key] = k\n l_rekey.append(int(new_key))\n l_rekey.sort()\n return l_rekey, d_rekey", "def dict(self, exclude=None, keys=None):\n\t\tdictionary = self.__dict__\n\n\t\t# Return immediately if the user only wants certain keys\n\t\tif keys:\n\t\t\tdictionary = {i: dictionary[i] for i in keys if i in dictionary}\n\t\t\treturn dictionary\n\n\t\tif exclude:\n\t\t\tdictionary = {key: dictionary[key] for key, _ in dictionary.items() if key not in exclude}\n\n\t\tdictionary = {key: dictionary[key] for key, _ in dictionary.items() if not key.startswith('_')}\n\t\treturn dictionary", "def _split_by_keypair(self, osw_dict={}): \n lst = osw_dict\n keypair_dict = []\n for d in lst:\n if d['key'] == 'raw_line':\n keypair_lst = re.split(r',',d['value'])\n \n for k,v in keypair_lst:\n _d = [{'timestamp':d['timestamp'] , \n 'category': d['category'], \n 'sub_category': d['sub_category'], \n 'key': k, \n 'value': v}]\n keypair_dict.extend(_d)\n \n return keypair_dict", "def prepend_all_keys(d, char=\"@\", ignore_keys=frozenset()):\n if isinstance(d, dict):\n for key in list(d.keys()):\n if key in ignore_keys:\n continue\n if isinstance(d[key], list) or isinstance(d[key], dict):\n prepend_all_keys(d[key], char=char, ignore_keys=ignore_keys)\n else:\n new_key = char + key\n d[new_key] = d[key]\n del d[key]\n prepend_all_keys(d[new_key], char=char, ignore_keys=ignore_keys)\n elif isinstance(d, list):\n for item in d:\n prepend_all_keys(item, char=char, ignore_keys=ignore_keys)", "def turn_keys_into_str(d):\n return { str(k) : v for k,v in d.items() }", "def _namespace_to_dict_util(n):\n if not isinstance(n, SimpleNamespace):\n return n\n\n ret = {}\n for k, v in vars(n).items():\n ret[k] = _namespace_to_dict_util(v)\n\n return ret", "def path_to_dict(path):\n global path_dict\n path_dict.overwrite({\n 'path': [sorted(x) for x in pairwise(path)],\n 'endpoints': (path[0], path[-1])\n })", "def buildDict(self, words):\n for word in words:\n length = len(word)\n key = \"{}/{}\".format(length, word[0])\n ls = self.origin.get(key, [])\n ls.append(word)\n self.origin[key] = ls", "def _pathlist(self, key, arr):\n params = {}\n i = 0\n for value in arr:\n i += 1\n params[\"%s.%s\" % (key, i)] = value\n return params", "def build_dict(self, d):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n colon = self.art_type([self.string_type(':')], baseline=0)\n def concat_no_breakpoint(k,v):\n k = self.build(k)\n v = self.build(v)\n elt = k + colon + v\n elt._breakpoints.remove(k._l)\n elt._breakpoints.remove(k._l + 1)\n return elt\n repr_elems = self.concatenate(\n (concat_no_breakpoint(k,v) for k,v in d.iteritems()),\n comma)\n return self.build_container(repr_elems,\n self.left_curly_brace, self.right_curly_brace)", "def convert(data):\n return {k: [d[k] for d in data] for k in data[0].keys()}", "def to_dict(self):\n return {key: getattr(self, key) for key in self.keys}", "def _convertToDict(self, parsed):\r\n d = dict()\r\n itp = iter(parsed)\r\n for pp in itp:\r\n if not isinstance(pp, list):\r\n if pp.find(';') == -1:\r\n # if not a list and doesn't include ';' it's a key and\r\n # next item is the value\r\n d[pp.strip()] = self._convertToDict(next(itp))\r\n else:\r\n s = pp.split(';')\r\n if not pp.endswith(';'):\r\n # last item is a key and next item is the value\r\n d[s[-1].strip()] = self._convertToDict(next(itp))\r\n s = s[:-1]\r\n for ppp in s:\r\n ss = ppp.split()\r\n if ss:\r\n d[ss[0].strip()] = ' '.join(ss[1:]).strip()\r\n return d", "def keysToString(indict):\r\n newD = {}\r\n for k, v in indict.iteritems():\r\n newD[k.name] = v\r\n return newD", "def shorten_keys(params):\n\tparam_names = {}\n\tfor n in params:\n\t\tparts = n.split('_')\n\t\tfirsts = [p[0] for p in parts]\n\t\tparam_names[n] = ''.join(firsts)\n\treturn param_names", "def get_dict(key):\r\n name = f\"{key}_dict\"\r\n return eval(name)", "def create_counterparty_dict(file_name) -> Dict[str, str]:\n dct = {}\n with open(file_name) as f:\n root_dir = f.readline().strip('\\n')\n for line in f:\n key, val = line.strip('\\n').split('!!!!')\n temp = val.split('==')\n d = {'path': root_dir + temp[0], 'to': temp[1:]}\n dct[key] = d\n return dct", "def insertable_dict(self):\n\n d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('table', 'stats', '_codes')}\n\n x = {('c_' + k).strip('_'): v for k, v in d.items()}\n\n return x", "def configFromKeysString(self, string=\"\"):\n keys = string.split('.')\n return self.configFromKeys(self.namespace[keys[0]], keys[1:])", "def flatten(d, path):\n\n if isinstance(d, dict):\n for k, v in d.items():\n yield from flatten(v, path + [k])\n else:\n yield (\".\".join(path), d)", "def potcar_str2dict(potcar_list: Optional[str]) -> dict:\n if potcar_list is None:\n return {}\n elif isinstance(potcar_list, str):\n potcar_list = potcar_list.split()\\\n\n d = {}\n for p in potcar_list:\n element = p.split(\"_\")[0]\n d[element] = p\n return d", "def to_dict(self):\n\n return DotDict(self.__iter__())", "def get_walked_obj(in_obj):\n out_obj = {}\n for paths, obj in objwalk(in_obj):\n out_obj['.'.join([str(path) for path in paths])] = obj\n\n part_key = '.'.join([str(path) for path in paths[:-1]])\n for key, value in [\n ('.'.join([part_key, 'KEYS']), paths[-1]),\n ('.'.join([part_key, 'VALUES']), obj)]:\n if key not in out_obj:\n out_obj[key] = []\n out_obj[key].append(value)\n\n return out_obj", "def create_model_owc(text: str) -> Dict[str, Set[str]]:\n dict_so_far = {}\n list_of_words = str.split(text)\n\n\n for x in range(0, len(list_of_words)):\n \"\"\"\n check if the word is followed by a period and add it to the follow list if it is, then remove the period to \n check if the word is followed by something else\n \"\"\"\n if list_of_words[x][-1] == '.':\n list_of_words[x] = list_of_words[x][0:-1]\n update_follow_set(dict_so_far, list_of_words[x], '.')\n\n else:\n update_follow_set(dict_so_far, list_of_words[x], list_of_words[x + 1].rstrip('.'))\n return dict_so_far", "def get(self, ckey, default=None):\n obj = default\n keys = ckey.split('.')\n first = keys[0]\n if self.has_key(first):\n obj = super(DotDict, self).__getitem__(first)\n if first == ckey:\n if isinstance(obj, dict):\n return DotDict(obj)\n else:\n return obj\n if isdictinstance(obj):\n return DotDict(obj).get('.'.join(keys[1:]))\n elif isinstance(obj, list):\n for elem in obj:\n if isdictinstance(elem):\n newobj = elem.get('.'.join(keys[1:]))\n if newobj:\n if isinstance(newobj, dict):\n return DotDict(newobj)\n return newobj\n return obj", "def filter_paths(paths : dict, spec : str) -> dict:\n all_paths = defaultdict(dict)\n for mag in paths.keys():\n specs = get_specs(mag, spec)\n paths_restructured = defaultdict(list)\n for path in paths[mag]:\n for s in specs:\n if s in path:\n paths_restructured[s].append(path)\n all_paths[mag] = paths_restructured\n return all_paths", "def test_references(self):\n a = DummyObject()\n d = {'a.a.a':1, 'a.b.a':3, 'b':a}\n # Check dict single level keys don't lose reference\n self.assertEqual( dottedDict(d).data['b'], d['b'] )\n self.assertEqual( dottedDict(d).data, dottedDict(dottedDict(d)).data )", "def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def dotted_prefixes(dotted_name, reverse=False):\n name_parts = dotted_name.split(\".\")\n if reverse:\n idxes = range(len(name_parts), 0, -1)\n else:\n idxes = range(1, len(name_parts)+1)\n result = ['.'.join(name_parts[:i]) or '.' for i in idxes]\n return result" ]
[ "0.73150593", "0.666839", "0.6528275", "0.647951", "0.6416033", "0.639155", "0.6269472", "0.6193029", "0.6132605", "0.6059191", "0.5956652", "0.59102404", "0.5872739", "0.5800979", "0.57487774", "0.5689122", "0.5673279", "0.562703", "0.5615472", "0.5609791", "0.56088036", "0.5602017", "0.5595142", "0.5573848", "0.5544635", "0.55432194", "0.5530819", "0.55306786", "0.5470455", "0.5464332", "0.5438511", "0.5393016", "0.53885716", "0.5361871", "0.5356645", "0.5354962", "0.53534913", "0.5335455", "0.5331355", "0.53138834", "0.5312626", "0.53076965", "0.53059393", "0.52821714", "0.52388746", "0.522919", "0.52217674", "0.5206735", "0.51986843", "0.5178168", "0.5176577", "0.51755995", "0.51738834", "0.51707786", "0.51671284", "0.5165182", "0.51649886", "0.51638085", "0.5161553", "0.5154695", "0.5152954", "0.51303655", "0.5123491", "0.5122538", "0.51218516", "0.51174885", "0.5115927", "0.511515", "0.5114017", "0.50999844", "0.50854915", "0.508545", "0.50850785", "0.5081562", "0.5079252", "0.5068603", "0.5066118", "0.5063887", "0.50632864", "0.50617427", "0.5061051", "0.50609195", "0.50557154", "0.50505674", "0.50459343", "0.5044256", "0.504376", "0.5042221", "0.5026848", "0.50236577", "0.5021625", "0.5017545", "0.501495", "0.5009106", "0.50010955", "0.499999", "0.4994949", "0.49938384", "0.49811086", "0.49807268" ]
0.73592526
0
get from nested dict
def get_from_dict(data_dict, map_list): return reduce(lambda d, k: d[k], map_list, data_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_nested(nested_dict, field):\n print(nested_dict, field)\n keys = field.split('.')\n current = nested_dict\n for k in keys:\n print('key', k, 'current', current)\n # return None for nested fields without a value in this doc\n if isinstance(current, list):\n # this list could contain anything. skip objects not containing `k`.\n return [x[k] for x in current if x.get(k) is not None]\n if not k in current:\n current = None\n break\n current = current[k]\n return current", "def _get_from_nest(nest, path):\n if not path or not nest:\n return nest\n return _get_from_nest(nest.get(path[0], None), path[1:])", "def get_deep(tree, path):\n for key in path[:-1]:\n tree = tree.get(key, {})\n return tree.get(path[-1])", "def nested_lookup(doc, field):\n value = doc\n keys = field.split(\".\")\n try:\n for k in keys:\n if isinstance(value, (list, tuple)):\n # assuming we have a list of dict with k as one of the keys\n stype = set([type(e) for e in value])\n if not stype:\n return None\n assert len(stype) == 1 and stype == {dict}, \"Expecting a list of dict, found types: %s\" % stype\n value = [e[k] for e in value if e.get(k)]\n # can't go further ?\n return value\n else:\n value = value[k]\n except KeyError:\n return None\n\n return value", "def nested_get(input_dict, nested_key):\n internal_dict_value = input_dict\n for k in nested_key:\n internal_dict_value = internal_dict_value.get(k, None)\n if internal_dict_value is None:\n return(None)\n return(internal_dict_value)", "def nested_get(input_dict, nested_key):\n internal_dict_value = input_dict\n for k in nested_key:\n internal_dict_value = internal_dict_value.get(k, None)\n if internal_dict_value is None:\n return(None)\n return(internal_dict_value)", "def nested_get(dictionary: dict, keys: list):\n nested_dict = dictionary\n for key in keys[:-1]:\n nested_dict = nested_dict[key]\n return nested_dict.get(keys[-1])", "def _try_get(nested_dict, dict_keys):\n try:\n for dict_key in dict_keys:\n nested_dict = nested_dict.__getitem__(dict_key)\n return nested_dict\n except:\n return ''", "def _get_nested_value(dct, key_path):\n key = key_path[0]\n if not isinstance(dct, dict):\n raise errors.AnsibleFilterError(\n f\"stringfilter: looking for key '{key}' \"\n f\"but list item is not dict: {pformat(dct)}\"\n )\n if key not in dct:\n raise errors.AnsibleFilterError(\n f\"stringfilter: key is '{key}' \"\n f\"but it was not found in dict: {pformat(dct)}\"\n )\n value = dct[key]\n if len(key_path) > 1:\n return _get_nested_value(value, key_path[1:])\n else:\n return value", "def deepget(self, key):\n if DEBUG: print(repr(self))\n if '.' in key:\n top, rest = key.split('.', 1)\n #if DEBUG: print(top, rest)\n return self[top].deepget(rest)\n else:\n return self[key]", "def nested_get(\n d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True\n) -> t.Optional[t.Any]:\n for name, key in path:\n d = d.get(key) # type: ignore\n if d is None:\n if raise_on_missing:\n name = \"table\" if name == \"this\" else name\n raise ValueError(f\"Unknown {name}: {key}\")\n return None\n\n return d", "def get_by_path(data: Dict[str, T], path: Sequence[str]) -> T:\n return reduce(operator.getitem, path, data)", "def find_in_dict(obj, key):\n if key in obj:\n return obj[key]\n for k, v in obj.items():\n if isinstance(v,dict):\n item = find_in_dict(v, key)\n if item is not None:\n return item", "def _get_nested_value(dictionary, nested_path):\n replacement_value_path = nested_path.split(\"/\")\n replacement_value = copy.deepcopy(dictionary)\n\n for item in replacement_value_path:\n replacement_value = replacement_value.get(item, {})\n\n if replacement_value == {}:\n replacement_value = \"\"\n\n return replacement_value", "def get(obj, path):\n right = path\n cur = obj\n while right:\n left, right = partition(right)\n if isinstance(cur, dict):\n cur = cur.get(left)\n elif isinstance(cur, (list, tuple)):\n left = int(left)\n cur = cur[left] if left < len(cur) else None\n return cur", "def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el", "def findItem(obj, key):\n if key in obj:\n return obj[key]\n if type(obj) == str:\n return None\n for k, v in obj.items():\n if isinstance(v, dict):\n item = findItem(v, key)\n if item is not None:\n return item\n elif isinstance(v, list):\n for list_item in v:\n item = findItem(list_item, key)\n if item is not None:\n return item", "def _get_value(self, main_key, sub_key):\n # pylint: disable=no-else-return\n if main_key in self.json_dict:\n if sub_key in self.json_dict[main_key]:\n return self.json_dict[main_key][sub_key]\n else:\n print(\n 'Sub key \"%s\" not in main key \"%s\"' %\n (sub_key, main_key))\n return None\n else:\n print('Main key \"%s\" not in JSON dict' % main_key)\n return None", "def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)", "def get(self, ckey, default=None):\n obj = default\n keys = ckey.split('.')\n first = keys[0]\n if self.has_key(first):\n obj = super(DotDict, self).__getitem__(first)\n if first == ckey:\n if isinstance(obj, dict):\n return DotDict(obj)\n else:\n return obj\n if isdictinstance(obj):\n return DotDict(obj).get('.'.join(keys[1:]))\n elif isinstance(obj, list):\n for elem in obj:\n if isdictinstance(elem):\n newobj = elem.get('.'.join(keys[1:]))\n if newobj:\n if isinstance(newobj, dict):\n return DotDict(newobj)\n return newobj\n return obj", "def get(self, key: str):\r\n\r\n if key in self._inner_dict:\r\n return self._inner_dict[key]\r\n else:\r\n raise KeyError(f\"key '{key}' is invalid\")", "def deep_get(_dict, keys, default=None):\n keys = keys.split('.')\n\n def _reducer(d, key):\n if isinstance(d, dict):\n return d.get(key, default)\n return default\n return reduce(_reducer, keys, _dict)", "def get_element(d, path): # type: (Dict, Tuple) -> Any\n if len(path) == 0:\n raise ValueError('Path length cant be 0')\n elif len(path) == 1:\n return d.get(path[0])\n elif d.get(path[0]):\n return DictUtil.get_element(d[path[0]], path[1:])\n return None", "def deep_get(_dict, keys, default=None):\n keys=keys.split('.')\n def _reducer(d, key):\n if isinstance(d, dict):\n return d.get(key, default)\n return default\n return reduce(_reducer, keys, _dict)", "def extract_values(obj, key): \n arr = [] \n def extract(obj, arr, key): \n \"\"\"Recursively search for values of key in JSON tree.\"\"\" \n if isinstance(obj, dict): \n for k, v in obj.items(): \n if isinstance(v, (dict, list)): \n extract(v, arr, key) \n elif k == key: \n arr.append(v) \n elif isinstance(obj, list): \n for item in obj: \n extract(item, arr, key) \n return arr \n results = extract(obj, arr, key) \n return results", "def _access_dict(self, d, key):\n try:\n # try to get access to the value by using the key\n value = d[key]\n return value\n except:\n # fail to access the value from the key\n # namely, the feature does not exist in the \n # feature dictionary of a specific apartment\n return None", "def get_nested_dict_entry_from_namespace_path(d, namespace_path):\n # Try to split off the namespace path into the first key and the rest of the keys\n split_namespace_path = namespace_path.split('.', 1)\n if len(split_namespace_path) == 1:\n # Only one key for a non-nested dict; return the result\n return d[split_namespace_path[0]]\n else:\n cur_key, path_remainder = split_namespace_path\n return get_nested_dict_entry_from_namespace_path(d[cur_key], path_remainder)", "def search_value(d, key, default=None):\n stack = [iter(d.items())]\n while stack:\n for k, v in stack[-1]:\n if isinstance(v, dict):\n stack.append(iter(v.items()))\n break\n elif k == key:\n return v\n else:\n stack.pop()\n return default", "def search(d, key, default=None):\n stack = [iter(d.items())]\n while stack:\n for k, v in stack[-1]:\n if isinstance(v, dict):\n stack.append(iter(v.items()))\n break\n elif k == key:\n return v\n else:\n stack.pop()\n return default", "def _get(obj, name):\n try:\n # try to get value using dict's __getitem__ descriptor first\n return dict.__getitem__(obj, name)\n except TypeError:\n # if it's a dict, then preserve the TypeError\n if isinstance(obj, dict):\n raise\n # otherwise try one last time, relying on __getitem__ if any\n return obj[name]", "def _get(self, key):\n current_storage_dict = self._storage\n sub_keys = key.split('.')\n i = 1\n sub_keys_count = len(sub_keys)\n for sub_key in sub_keys:\n if i < sub_keys_count:\n if sub_key in current_storage_dict:\n current_storage_dict = current_storage_dict[sub_key]\n else:\n return\n\n else:\n if sub_key in current_storage_dict:\n return current_storage_dict[sub_key]\n else:\n return\n\n i += 1", "def _get_from_dict(map, attrs):\n for a in attrs:\n try:\n map = map[a]\n except KeyError:\n return None\n return map", "def do_get(d, *ks, **kwargs):\n try:\n res = reduce (lambda acc, k: acc[k], ks, d)\n except (KeyError, TypeError):\n if \"default\" in kwargs:\n return kwargs[\"default\"]\n else:\n t, v, tb = sys.exc_info()\n if t == KeyError:\n msg = \"nested keys {} not found in {}\".format(ks, d)\n else:\n msg = \"nesting of keys {} too is too deep for {}\".format(ks, d)\n raise KeyError, msg, tb\n else:\n return res", "def search_key(cls, key, element):\n if isinstance(element, dict):\n for k, v in element.items():\n if k == key:\n return v\n elif isinstance(v, dict):\n cls.search_key(key, v)\n elif isinstance(v, list):\n cls.search_key(key, v)\n elif isinstance(element, list):\n for obj in element:\n v = cls.search_key(key, obj)\n if v:\n return v", "def gen_dict_extract(key, var):\n if hasattr(var,'items'):\n for k, v in var.items():\n if k == key:\n yield v\n if isinstance(v, dict):\n for result in gen_dict_extract(key, v):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in gen_dict_extract(key, d):\n yield result", "def get_field(key, obj):\n\n val = obj\n\n for subkey in key.split('.'):\n val = val[subkey]\n\n return val", "def find_value_for_nested_key(mapping, key_of_interest, tree=[]):\n original_mapping = mapping\n logger.debug(\"Looking for key %s\", key_of_interest)\n logging.debug(\"Looking in %s\", mapping)\n logger.debug(\"Using tree %s\", tree)\n if tree:\n for leaf in tree:\n mapping = mapping[leaf]\n else:\n tree = [None]\n for leaf in reversed(tree):\n logging.debug(\"Looking in bottommost leaf %s\", leaf)\n for key, value in six.iteritems(mapping):\n if key == key_of_interest:\n return value\n if leaf:\n find_value_in_nested_key(original_mapping, key_of_interest, tree[:-1])\n warnings.warn(\"Couldn't find value for key %s\" % key_of_interest)\n # raise KeyError(\"Couldn't find value for key %s\", key_of_interest)", "def get_by_dot_path(dictionary: Dict, key_path: str) -> Any:\n return get_by_list_of_keys(dictionary, key_path.split(\".\"))", "def get_safe(dict_instance, keypath, default=None):\n try:\n obj = dict_instance\n keylist = keypath if type(keypath) is list else keypath.split('.')\n for key in keylist:\n obj = obj[key]\n return obj\n except Exception, ex:\n return default", "def getProperty(self, child, key):\n\n # First get the child's dictionary\n childDict = self.getInfoDict(child)\n if childDict:\n return childDict.get(key, None)", "def get_by_path(root, path):\n \n sub_data = root\n for key in path:\n sub_data = sub_data[key]\n \n return sub_data", "def __getitem__(self, key):\n return self._root.__getitem__(key)", "def get(key):\n return current().values[key]", "def getattr_nested(obj, idxs):\n if len(idxs) == 0:\n return obj\n\n idx = idxs.pop(0)\n\n if isinstance(obj, dict):\n if idx in obj:\n return getattr_nested(obj[idx], idxs)\n elif isinstance(obj, (list, tuple)) and isinstance(idx, int):\n if idx < len(obj):\n return getattr_nested(obj[idx], idxs)\n else:\n return getattr_nested(getattr(obj, idx))", "def traverse_dict(dic, entry_list):\n length = len(entry_list)\n if length > 0:\n element = entry_list[0]\n if isinstance(dic, dict) and length > 1 and element in dic:\n return traverse_dict(dic[element], entry_list[1:])\n elif isinstance(dic, dict) and length == 1 and element in dic:\n return dic[element]\n return None", "def _strict_get_value(item: JsonObject, *keys: str) -> Any:\n try:\n val = item\n for key in keys:\n if isinstance(val, dict):\n val = val[key]\n else:\n raise KeyError(f\"Access path {keys} leads to a non-dict object.\")\n except KeyError:\n raise KeyError(f\"Key '{keys}' does not exist in all items. Try 'strict=False'.\")\n else:\n return val", "def dict_search_recursive(d, k):\n # FIXME: make it generic recursive search over nested graphs and move to smp_base\n\n # print \"#\" * 80\n # print \"searching k = %s \" % (k,),\n if k in d:\n # print \"found k = %s, params = %s\" % (k, d[k]['params'].keys())\n return d[k]\n else:\n # print \"d.keys()\", d.keys()\n for k_, v_ in list(d.items()):\n # if v_[\n if 'graph' in v_['params']: # or v_['params'].has_key('subgraph'):\n # print \"k_\", k_, \"v_\", v_['params'].keys()\n return dict_search_recursive(v_['params']['graph'], k)\n # None found\n return None", "def extract(obj, arr, key): \n if isinstance(obj, dict): \n for k, v in obj.items(): \n if isinstance(v, (dict, list)): \n extract(v, arr, key) \n elif k == key: \n arr.append(v) \n elif isinstance(obj, list): \n for item in obj: \n extract(item, arr, key) \n return arr", "def get_deep(config, key_seq):\n if 1 == len(key_seq):\n return config[key_seq[0]]\n else:\n return get_deep(config[key_seq[0]], key_seq[1:])", "def _get(d, *paths):\n if d is None:\n return None\n\n if paths is None:\n return None\n\n for path in paths:\n if path is None:\n return None\n\n path = path.split('.')\n for key in path:\n try:\n i = int(key)\n if i in d:\n return d[i]\n else:\n return None\n\n except BaseException:\n d = d.get(key, None)\n if d is None:\n return None\n\n return d", "def __getitem__(self, key):\n if not isinstance(key, str) or '.' not in key:\n return dict.__getitem__(self, key)\n obj, token = _descend(self, key)\n return _get(obj, token)", "def __getitem__(self, key):\n for k,v in list(self.__dict__.items()):\n if k == key:\n return v\n try:\n return v[key]\n except:\n pass\n\n print((\"Item %s could not be found...\" %key))", "def _get_item(dic: dict, keys: list) -> dict:\n\tfor key in keys:\n\t\tdic = dic[key]\n\n\treturn dic", "def get_key_recursive(key, config):\n if not isinstance(key, list):\n key = key.split(\"/\") # subdict indexing split using slash\n assert key[0] in config, f\"missing key '{key[0]}' in metadata dictionary: {config}\"\n val = config[key[0]]\n if isinstance(val, (dict, collections.OrderedDict)):\n assert len(key) > 1, \"missing keys to index metadata subdictionaries\"\n return get_key_recursive(key[1:], val)\n return int(val)", "def json_extract(obj, key):\n arr = []\n\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n values = extract(obj, arr, key)\n return values", "def extract_dict(d, f):\n if len(f) == 1:\n return extract_value(f[0], d)\n else:\n return extract_dict(d[f[0]], f[1:])", "def get(pb_or_dict, key, default=_SENTINEL):\n # We may need to get a nested key. Resolve this.\n key, subkey = _resolve_subkeys(key)\n\n # Attempt to get the value from the two types of objects we know baout.\n # If we get something else, complain.\n if isinstance(pb_or_dict, Message):\n answer = getattr(pb_or_dict, key, default)\n elif isinstance(pb_or_dict, collections.Mapping):\n answer = pb_or_dict.get(key, default)\n else:\n raise TypeError('Tried to fetch a key %s on an invalid object; '\n 'expected a dict or protobuf message.')\n\n # If the object we got back is our sentinel, raise KeyError; this is\n # a \"not found\" case.\n if answer is _SENTINEL:\n raise KeyError(key)\n\n # If a subkey exists, call this method recursively against the answer.\n if subkey and answer is not default:\n return get(answer, subkey, default=default)\n\n # Return the value.\n return answer", "def get_nested_property_from_data(data: Dict, property_key: str) -> Optional[Any]:\n if data is None or not isinstance(data, Dict):\n return None\n\n if property_key in data:\n return data[property_key]\n\n sep = \".\"\n splitted_key = property_key.split(sep)\n\n return get_nested_property_from_data(\n data.get(splitted_key[0]), sep.join(splitted_key[1:])\n )", "def extract_values(obj, key):\n arr = []\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n results = extract(obj, arr, key)\n return results", "def _resolve_dict_entry(self, doc_uri, main_doc, obj):\n # Interpret '$ref' key if present in obj\n if '$ref' in obj:\n result = self._load_ref(doc_uri, main_doc, obj['$ref'])\n else:\n result = self.dict_class()\n # Merge values from obj with result\n for k, v in obj.items():\n if k != '$ref':\n result[k] = self._resolve(doc_uri, main_doc, v)\n return result", "def get_in(self, key=None, default=None):\n if key is None:\n raise KeyError(\"'Dict' attribute key can't be empty\")\n key_list = key.strip().split('.')\n data = self\n size = len(key_list)\n for index, k in enumerate(key_list):\n data = data.get(k)\n if index < size-1 and not isinstance(data, dict):\n return default\n return data", "def getattr_nested(obj, name):\n dots = name.count('.')\n if dots == 0:\n return getattr(obj, name)\n else:\n first, rest = name.split('.', 1)\n return getattr_nested(getattr(obj, first), rest)", "def get_item(obj, key):\n val = None\n if obj and type(obj) == dict:\n val = obj.get(key)\n elif obj and hasattr(obj, key):\n val = getattr(obj, key)\n val = val or ''\n return val", "def getSpecific(self, keyword, key):", "def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n # if the key is in the dictionary\n if k == key:\n # the key points to a list eg, key is 'dataElements':\n # \"dataElements\" : [\n # { \"id\": \"UID1\", \"id\": \"UID2\", ... }\n # ]\n if isinstance(v, list):\n for item in v:\n arr.append(item[\"id\"]) if item[\"id\"] not in arr else arr\n # the key points to another dictionary eg, key is 'dataElement':\n # \"dataElement\" : { \"id\": UID }\n elif isinstance(v, dict):\n if 'id' in v:\n arr.append(v[\"id\"])\n # It is a dictionary but not containing the id\n # Fetch the key and keep on looking\n else:\n for _key in list(v.keys()):\n if isinstance(v[_key], dict):\n extract(v, arr, _key)\n # if it is not a list or a dict, we simply take the value eg, key is organisationUnit\n # \"organisationUnit\" : UID\n else:\n arr.append(v)\n # if key is not there but it is still a dict or a list,\n # call the extract function again to keep going down another level\n elif isinstance(v, (dict, list)):\n extract(v, arr, key)\n # if it is a list, loop each element and call the extract function\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr", "def deep_get(nested_dict, keys, default=None):\n if not isinstance(nested_dict, dict):\n raise exception.SysinvException(_(\n \"Expected a dictionary, cannot get keys {}.\".format(keys)))\n\n def _reducer(d, key):\n if isinstance(d, dict):\n return d.get(key, default)\n return default\n\n return functools.reduce(_reducer, keys, nested_dict)", "def get_dictvalue_from_xpath(full_dict, path_string):\n\n\tkey_value = full_dict\n\n\tfor i in path_string.split('/')[1:] :\n\t\tkey_value = key_value[i]\n\n\treturn key_value", "def extract_values(obj, key):\n arr = []\n\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n results = extract(obj, arr, key)\n return results", "def extract_values(obj, key):\n arr = []\n\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n results = extract(obj, arr, key)\n return results", "def _get_nested_value(creds_json, env_var_path_list):\n if len(env_var_path_list) > 1:\n return _get_nested_value(creds_json.get(env_var_path_list[0], {}), env_var_path_list[1:])\n return creds_json.get(env_var_path_list[0])", "def get_value(obj, name):\n if isinstance(obj, dict):\n return obj.get(name)\n\n return getattr(obj, name, obj)", "def get_random_value_from_dict(d: dict):\n return d[get_random_key_from_dict(d)]", "def get_val_from_nested_object(in_obj, keys):\n\n # Initialize variables\n list_of_keys = keys.split(\"/\")\n temp = {}\n\n # Loop to get the values for all the keys in a given list sequentially\n for key in list_of_keys:\n\n if key == \"\":\n print(f\"[ERROR] : Blank key encountered in the input chain.\")\n return \"ERROR\"\n\n try:\n # Only for the first time\n if len(temp) == 0:\n temp = in_obj[key]\n\n else:\n temp = temp[key]\n\n except KeyError:\n # In case invalid / inexistent key is provided\n print(f\"[ERROR] :: No data found for input key '{keys}'\")\n return \"ERROR\"\n\n return temp", "def extract_values(obj, key):\n arr = []\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n results = extract(obj, arr, key)\n return results", "def test_from_dict(self):\n cd = ConfigDict.from_dict({\n 'x': 1,\n 'y': {\n 'z': 2,\n 'w': [1,2, {'v': 22}]\n }\n })\n\n self.assertEquals(cd.x, 1)\n self.assertEquals(cd['x'], 1)\n self.assertEquals(cd.y.z, 2)\n self.assertEquals(cd['y']['z'], 2)\n self.assertEquals(cd.y.w[2].v, 22)\n self.assertEquals(cd['y']['w'][2]['v'], 22)", "def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr", "def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr", "def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr", "def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr", "def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr", "def get_from_dict(d, k):\n try:\n return reduce(dict.get, k, d)\n except TypeError:\n # Value not found.\n return None", "def rget(dict_object, path_list):\n try:\n return reduce(lambda d, k: d[k], path_list, dict_object)\n except KeyError:\n return dict_object", "def _nested_lookup(doc, field):\n value = doc\n keys = field.split(\".\")\n try:\n for k in keys:\n value = value[k]\n except KeyError:\n return None\n\n return str(value)", "def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr", "def find_value(dic, key):\n return dic[key]", "def find_key(key, var):\n if hasattr(var, 'items'):\n for k, v in var.items():\n if k == key:\n yield v\n if isinstance(v, dict):\n for result in find_key(key, v):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in find_key(key, d):\n yield result", "def _raw(self,key):\n for d in self.__dicts:\n if key in d:\n return d._raw(key)\n raise KeyError(key)", "def nested_dict_walker(fn, coll):\n return walk_values_rec(iffy(is_mapping, fn), coll)", "def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)", "def _get_by_path(tree, keys):\n return reduce(getitem, keys, tree)", "def _get_config_val(self, config: dict, config_key: str):\n config_val = config.get(config_key, {})\n if isinstance(config_val, list) and isinstance(config_val[0], dict):\n config_val = dict(ChainMap(*config_val))\n return config_val", "def __getitem__(self, key):\n return self.graph.readExtendedAttributes(self.entityId, key)[0]", "def get_data(self,key=''):\n path = key.split('.')\n itm = self._root \n for ik,k in enumerate(path):\n child_found = False\n try: \n itm = itm[k]\n child_found = True\n except:\n try: \n itm = itm[int(k)]\n child_found = True\n except:\n longer_key = k\n for kk in path[ik+1:]:\n longer_key += '.'\n try: \n itm = itm[longer_key]\n child_found = True\n except: \n pass\n longer_key += kk\n try: \n itm = itm[longer_key]\n child_found = True\n except: \n pass\n if not child_found:\n raise KeyError(key)\n return itm", "def get_value_from_object(obj, key):\n if is_dict(obj):\n return obj.get(key)\n return getattr(obj, key, None)", "def _lookup_element(lst, key):\n if not lst:\n return {}\n for ele in lst:\n if not ele or not isinstance(ele, dict):\n continue\n if key in ele:\n return ele[key]\n return {}", "def get_value(self) -> Dict[str, any]:", "def select(self, key: str) -> 'MultiDictGetter':\n return MultiDictGetter(d for d in self.get(key=key) if isinstance(d, dict))", "def __getitem__(self, key):\n key_split = key.split('.')\n last_index = len(key_split) - 1\n current = self\n for i, k in enumerate(key_split):\n try:\n current = getattr(current, k)\n except KeyError:\n if i == last_index:\n raise\n temp_dict = DotDictWithAcquisition()\n temp_dict.__dict__['_parent'] = weakref.proxy(current)\n current = temp_dict\n return current", "def get(self, key):\n if key is None:\n return None # None is not a valid key\n return get_from_subtree(self.root, key)", "def get(self, key, inner_key):\n app.logger.info('Request to Update a map_object with key [%s]', key)\n # check_content_type('application/json')\n map_object = Map.find(key)\n if not map_object:\n raise NotFound(\"Map with key '{}' was not found.\".format(key))\n item = map_object.get_value_with_key(inner_key)\n # item = map_object.value.get(inner_key, None)\n if not item:\n # map_object.value[0]['key']\n raise NotFound(\"Item with key '{}' was not found in '{}' Map.\".format(inner_key, key))\n return item, status.HTTP_200_OK", "def _get_dict_model(cls, key, model, spec):\n try:\n return model[key]\n except KeyError:\n raise ObjectNotFoundError(path=spec[\"full_path\"])" ]
[ "0.7243689", "0.70014375", "0.68189776", "0.67736", "0.67402154", "0.67402154", "0.67254245", "0.66089696", "0.65266186", "0.650513", "0.65000695", "0.6375932", "0.63684773", "0.6340982", "0.6306014", "0.6139669", "0.60748714", "0.6065115", "0.60559213", "0.60281414", "0.60154724", "0.6005097", "0.59930015", "0.59698576", "0.5955901", "0.5949173", "0.59407425", "0.5939667", "0.5929992", "0.59277254", "0.59276587", "0.5915613", "0.59154755", "0.5907652", "0.5852286", "0.5845243", "0.58364844", "0.58329093", "0.582292", "0.5821545", "0.57996005", "0.57993084", "0.5799288", "0.5797902", "0.5791505", "0.57835126", "0.5779478", "0.5777167", "0.5762494", "0.5760677", "0.57598805", "0.57516205", "0.5750569", "0.57431954", "0.5742315", "0.57395643", "0.5738911", "0.57295376", "0.57274616", "0.5723333", "0.57136875", "0.5708895", "0.56952596", "0.5692184", "0.5674272", "0.5670217", "0.5667248", "0.56641567", "0.56641567", "0.5659682", "0.5648844", "0.56410086", "0.5640192", "0.5632718", "0.5632456", "0.56312984", "0.56312984", "0.56312984", "0.56312984", "0.56312984", "0.5615796", "0.56111825", "0.56091446", "0.5605216", "0.55999404", "0.5594544", "0.55914813", "0.55809206", "0.5578047", "0.5578047", "0.5566089", "0.5559235", "0.55560654", "0.55471647", "0.55349886", "0.55336887", "0.5528001", "0.5523301", "0.5518431", "0.5510618", "0.5506281" ]
0.0
-1
set in nested dict
def set_in_dict(data_dict, map_list, value): target = get_from_dict(data_dict, map_list[:-1]) if isinstance(target, dict): if len(target[map_list[-1]]) == 0 and isinstance(value, str): target[map_list[-1]] = value else: target[map_list[-1]]['value'] = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nested_set(dictionary: dict, keys: list, value):\n nested_dict = dictionary\n for key in keys[:-1]:\n nested_dict = nested_dict[key]\n nested_dict[keys[-1]] = value\n return dictionary", "def test_deep_set_create(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.set_dict_key_value(mdict, \"K:L:M\", \"Q\")\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\"}},\n \"K\": {\"L\": {\"M\": \"Q\"}},\n },\n res,\n )", "def nested_set(data, keys, value):\n for key in keys[:-1]:\n data = data.setdefault(key, {})\n data[keys[-1]] = value", "def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict:\n if not keys:\n return d\n\n if len(keys) == 1:\n d[keys[0]] = value\n return d\n\n subd = d\n for key in keys[:-1]:\n if key not in subd:\n subd = subd.setdefault(key, {})\n else:\n subd = subd[key]\n\n subd[keys[-1]] = value\n return d", "def set_nested_item(data_dict: dict, key_list: tuple or list, value):\r\n reduce(getitem, key_list[:-1], data_dict)[key_list[-1]] = value\r\n return data_dict", "def _try_set(set_list, index, nested_dict, dict_keys=[]):\n try:\n for dict_key in dict_keys:\n nested_dict = nested_dict.__getitem__(dict_key)\n set_list[index] = str(nested_dict)\n return nested_dict\n except:\n return ''", "def test_deep_set_ordered_dicts(self):\n res = dictupdate.set_dict_key_value({}, \"A:B\", \"foo\", ordered_dict=True)\n self.assertEqual({\"A\": OrderedDict([(\"B\", \"foo\")])}, res)", "def set(cls, hierarchical_dict: dict, key: str, value: Any) -> None:\n # split according to '.'\n hierarchical_key = key.split(\".\")\n\n # go over the the dictionary according to the path, create the nodes that does not exist\n element = hierarchical_dict\n for key in hierarchical_key[:-1]:\n if key not in element:\n element[key] = {}\n element = element[key]\n\n # set the value\n element[hierarchical_key[-1]] = value", "def test_deep_set_overwrite(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.set_dict_key_value(mdict, \"C:F\", \"foo\")\n self.assertEqual({\"A\": \"B\", \"C\": {\"D\": \"E\", \"F\": \"foo\"}}, res)\n # Verify modify-in-place\n self.assertEqual({\"A\": \"B\", \"C\": {\"D\": \"E\", \"F\": \"foo\"}}, mdict)\n\n # Test using alternative delimiter\n res = dictupdate.set_dict_key_value(\n mdict, \"C/F\", {\"G\": \"H\", \"I\": \"J\"}, delimiter=\"/\"\n )\n self.assertEqual(self.dict1, res)\n\n # Test without using a delimiter in the keys\n res = dictupdate.set_dict_key_value(mdict, \"C\", None)\n self.assertEqual({\"A\": \"B\", \"C\": None}, res)", "def deepset(self, key, val):\n if DEBUG:\n print(repr(self))\n if '.' in key:\n top, rest = key.split('.', 1)\n topd = self.setdefault(top, self.__class__())\n topd.deepset(rest, val)\n else:\n self[key] = val", "def recursive_update_cfg(d, u):\n for k, v in u.iteritems():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def set_dict(self, dic): # -> None:\n ...", "def visit_dict(self, sydict):\n self.current.update(sydict)", "def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def _set_dict_on_settings(self, data, settings):\n keys = data.get('keys', {})\n for key in keys:\n settings.set_value(key, keys[key])\n groups = data.get('groups', {})\n for group in groups:\n sub = settings.get_settings(group)\n self._set_dict_on_settings(groups[group], sub)", "def visit_dict(self, sydict):\n for key, value in sydict.items():\n child = type(value)(value.container_type)\n self.current[key] = child\n value.visit(SpineCopyVisitor(child))", "def deepupdate(original, update):\n for key, value in original.iteritems():\n if not key in update:\n update[key] = value\n elif isinstance(value, dict):\n deepupdate(value, update[key])\n return update", "def set_many(self, block, update_dict):\n updated_dict = {}\n\n # Generate a new dict with the correct mappings.\n for (key, value) in update_dict.items():\n updated_dict[self._key(block, key)] = value\n\n self._kvs.set_many(updated_dict)", "def buildDict(self, dict):\n for x in dict:\n self.EntireSet.append(x)\n print self.EntireSet", "def __setChildDict(self, child):\n \n d = self[self._name]\n d[child.getName()] = child.getDict()", "def set_if_set(dct, key, value):\r\n if value is not UNSET:\r\n dct[key] = value", "def test_recursive_update():\n\n test = Status.update_dict({'generation': TEST_1_ATTRS_1},\n {'generation': TEST_1_ATTRS_2})\n\n assert test['generation']['run_id'] == TEST_1_ATTRS_1['run_id']\n assert test['generation']['job_status'] == TEST_1_ATTRS_2['job_status']", "def add_flat(dict_, key, elem):\r\n\r\n if isinstance(elem, dict):\r\n dict_.update(elem)\r\n else:\r\n dict_[key] = elem", "def set_field(key, obj, val):\n\n o = obj\n subkeys = key.split('.')\n\n for subkey in subkeys[:-1]:\n if subkey not in o:\n o[subkey] = {}\n\n o = o[subkey]\n\n o[subkeys[-1]] = val", "def test_deep_update(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.update_dict_key_value(\n mdict, \"C:F\", {\"foo\": \"bar\", \"qux\": \"quux\"}\n )\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\", \"foo\": \"bar\", \"qux\": \"quux\"}},\n },\n res,\n )\n\n # Test updating a non-existing subkey\n res = dictupdate.update_dict_key_value({}, \"foo:bar:baz\", {\"qux\": \"quux\"})\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": {\"qux\": \"quux\"}}}}, res)\n # Test updating a non-existing subkey, with a different delimiter\n res = dictupdate.update_dict_key_value(\n {}, \"foo bar baz\", {\"qux\": \"quux\"}, delimiter=\" \"\n )\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": {\"qux\": \"quux\"}}}}, res)", "def _update_dict(full_key, val, d):\n for vk, vv in val.items():\n # The key of value is not in d.\n # if vk not in d:\n # # Exit.\n # raise ValueError(\"{}.{} does not exist in options\".format(full_key, vk))\n # else: # The key of val is in d.\n if isinstance(vv, list): # The value of the key is list.\n d[vk] = np.array(vv) # Store it as a numpy array.\n elif isinstance(vv, dict): # The value of the key is dictionary.\n _update_dict(full_key + \".\" + vk, vv, d[vk]) # Call the function again.\n else: # At the leaf of the dictionary.\n d[vk] = vv", "def set(pb_or_dict, key, value):\n # pylint: disable=redefined-builtin,too-many-branches\n # redefined-builtin: We want 'set' to be part of the public interface.\n # too-many-branches: This method is inherently complex.\n\n # Sanity check: Is our target object valid?\n if not isinstance(pb_or_dict, (collections.MutableMapping, Message)):\n raise TypeError('Tried to set a key %s on an invalid object; '\n 'expected a dict or protobuf message.' % key)\n\n # We may be setting a nested key. Resolve this.\n key, subkey = _resolve_subkeys(key)\n\n # If a subkey exists, then get that object and call this method\n # recursively against it using the subkey.\n if subkey is not None:\n if isinstance(pb_or_dict, collections.MutableMapping):\n pb_or_dict.setdefault(key, {})\n set(get(pb_or_dict, key), subkey, value)\n return\n\n # Attempt to set the value on the types of objects we know how to deal\n # with.\n if isinstance(pb_or_dict, collections.MutableMapping):\n pb_or_dict[key] = value\n elif isinstance(value, (collections.MutableSequence, tuple)):\n # Clear the existing repeated protobuf message of any elements\n # currently inside it.\n while getattr(pb_or_dict, key):\n getattr(pb_or_dict, key).pop()\n\n # Write our new elements to the repeated field.\n for item in value:\n if isinstance(item, collections.Mapping):\n getattr(pb_or_dict, key).add(**item)\n else:\n getattr(pb_or_dict, key).extend([item])\n elif isinstance(value, collections.Mapping):\n # Assign the dictionary values to the protobuf message.\n for item_key, item_value in value.items():\n set(getattr(pb_or_dict, key), item_key, item_value)\n elif isinstance(value, Message):\n # Assign the protobuf message values to the protobuf message.\n for item_key, item_value in value.ListFields():\n set(getattr(pb_or_dict, key), item_key.name, item_value)\n else:\n setattr(pb_or_dict, key, value)", "def recursive_mapping_update(d, u):\n if u is not None:\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = recursive_mapping_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def _update_loose (self, dict):\n self.__dict__.update(dict)", "def update_dict(new,old):", "def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(len(sub_ch), 2)\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual(c.get_content(), 'v1')\n else:\n self.assertEqual(c.get_content(), 'v2')", "def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}", "def deep_update(d, u):\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n elif isinstance(v, collections.abc.Mapping):\n d[k] = deep_update(dv, v)\n else:\n d[k] = v\n return d", "def setMyDictInfo(self, myDict):\n for key, value in myDict.iteritems():\n myObj = getattr(self, key)\n if type(myObj) == types.DictType:\n # this is a dict of objects\n for key2, value2 in myObj.iteritems():\n myChildObj = myObj[key2]\n try:\n myChildDict = myDict[key][key2]\n myChildObj.setMyDictInfo(myChildDict)\n except:\n pass\n if type(value) == types.DictType and len(myObj.keys()) == 0:\n setattr(self, key, value)\n else:\n setattr(self, key, value)", "def test_set_many(self):\r\n kv_dict = self.construct_kv_dict()\r\n\r\n self.kvs.set_many(kv_dict)\r\n for key in kv_dict:\r\n self.assertEquals(self.kvs.get(key), kv_dict[key])", "def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(2, len(sub_ch))\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual('v1', c.get_content())\n else:\n self.assertEqual('v2', c.get_content())", "def dict_form(self):\n dict_set = {\n \"element\": self.element,\n \"left\": self.left,\n \"right\": self.right,\n }\n return dict_set", "def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value", "def test_set_dict_value_2(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")", "def update(self, new_values):\n values_copy = new_values.copy()\n for key in self.SET_KEYS:\n if key in values_copy:\n values_copy[key] = set(values_copy[key])\n super(ConfigDict, self).update(values_copy)", "def update(sn, d):\n if isinstance(sn, SN):\n sn = vars(sn)\n d = unwind_nested_dict(decode(d))\n for k, v in d.items():\n if k in sn and isinstance(v, (dict, SN)) and isinstance(sn[k], (dict, SN)):\n update(sn[k], v)\n else:\n sn[k] = encode(v)", "def update(d, u):\n\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def set(\n obj: Union[JSONArray, JSONObject], # pylint: disable=unsubscriptable-object\n key: Union[int, str, FlatKey], # pylint: disable=unsubscriptable-object\n value: JSONObject,\n):\n if isinstance(key, Iterable) and not isinstance(key, str):\n key = tuple(key)\n else:\n key = (key,)\n k = key[0]\n for next_key in key[1:]:\n if isinstance(next_key, int):\n next_obj = []\n elif isinstance(next_key, str):\n next_obj = {}\n else:\n raise JSONTypeError(\n type=Union[int, str],\n value=next_key,\n msg=f\"JSON key must have type {{type}}, got {type(next_key)}\",\n )\n if isinstance(obj, list) and k == len(obj):\n obj.append(next_obj)\n else:\n obj[k] = next_obj\n obj = next_obj\n k = next_key\n if isinstance(obj, list) and k == len(obj):\n obj.append(value)\n else:\n obj[k] = value", "def as_set(dict_inst):\n\n return frozenset(dict_inst.items()) if isinstance(dict_inst, dict) else None", "def make_set(node):\n node.parent = node\n node.rank = 0", "def set_upward(self, key, value):\n context = self.dicts[-1]\n for d in reversed(self.dicts):\n if key in d:\n context = d\n break\n context[key] = value", "def deep_merge(origin: dict, renovator: Mapping) -> dict:\n\n for key, value in renovator.items():\n if isinstance(value, Mapping):\n node = origin.setdefault(key, {})\n deep_merge(node, value)\n else:\n origin[key] = value\n\n return origin", "def test_set_dict_value_3(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"C\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"C\")", "def set():", "def test_add_nested_dict_value():\n\n recursiveKeys = [\n \"reward_signals.extrinsic.strength\",\n \"reward_signals.extrinsic.gamma\",\n \"reward_signals.curiosity.strength\",\n \"reward_signals.curiosity.gamma\",\n ]\n\n expectedDict = {\n 'reward_signals': {\n 'curiosity': {'gamma': 1.0, 'strength': 1.0},\n 'extrinsic': {'gamma': 1.0, 'strength': 1.0},\n }\n }\n\n dictionary = {}\n\n for key in recursiveKeys:\n common.add_nested_dict_value(dictionary, key, 1.0)\n\n assert dictionary == expectedDict\n\n dictionary = {'reward_signals': {'extrinsic': {}}}\n\n for key in recursiveKeys:\n common.add_nested_dict_value(dictionary, key, 1.0)\n\n assert dictionary == expectedDict\n\n dictionary = {'reward_signals': {'extrinsic': {'gamma': 0.99}}}\n\n for key in recursiveKeys:\n common.add_nested_dict_value(dictionary, key, 1.0)\n\n assert dictionary == expectedDict", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def _update_dicts(master_dict, update_dict):\n for key, value in update_dict.items():\n for subkey, subvalue in value.items():\n master_dict[key][subkey] = subvalue", "def set_many(self, update_dict):\n for key, value in update_dict.items():\n self.set(key, value)", "def update_double_dict(outer, inner):\n for k, v in outer.items():\n outer[k].update(inner[k])", "def _set_by_path(dic, keys, value, create_missing=True):\n d = dic\n i = 0\n n_key = len(keys) - 1\n while i < n_key:\n k = keys[i]\n if isinstance(k, int):\n assert isinstance(d, list), \"Internal Error: %s is Expected as a list for %s.\" % (d, k)\n\n while len(d) <= k:\n d.insert(k, {})\n d = d[k]\n elif k in d:\n d = d[k]\n elif create_missing:\n next_key = keys[i + 1]\n if isinstance(next_key, int):\n if isinstance(d, list):\n d.insert(k, [])\n else:\n d[k] = []\n else:\n d[k] = {}\n d = d[k]\n else:\n return dic\n i += 1\n\n if isinstance(d, list) and keys[-1] >= len(d):\n d.insert(keys[-1], value)\n else:\n d[keys[-1]] = value\n return dic", "def recursive_update(d1, d2):\n\n for key, value in d2.items():\n if key in d1 and isinstance(d1[key], dict) and isinstance(value, dict):\n recursive_update(d1[key], value)\n else:\n d1[key] = value", "def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def recursive_update(\n base_dict: typing.Dict[typing.Any, typing.Any],\n new_dict: typing.Mapping[typing.Any, typing.Any],\n ) -> None:\n for key, value in new_dict.items():\n if isinstance(value, collections.Mapping) and (\n base_dict.get(key) is not None\n ):\n TrainingConfig.recursive_update(base_dict[key], value)\n else:\n base_dict[key] = value", "def update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "def __setattr__(self, key, value):\n if isinstance(value, DotDict) and key != '_parent':\n value.__dict__['_parent'] = weakref.proxy(self)\n super(DotDictWithAcquisition, self).__setattr__(key, value)", "def recursiveSearchReplace(x, s, r):\n for k, v in x.items():\n if type(v) is dict:\n recursiveSearchReplace(v, s, r)\n else:\n if v == s:\n x[k] = r", "def dictsub(subset,superset):\n\treturn all(item in superset.items() for item in subset.items())", "def _update_careful (self, dict):\n for attr in dir(self):\n if dict.has_key(attr):\n dval = dict[attr]\n if dval is not None:\n setattr(self, attr, dval)", "def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}", "def test_set_dict_value_1(self):\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"A\")", "def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]", "def _update(self, db_item, update_dict, unset=None, pull=None, push=None, push_list=None, pull_list=None):\n def _iterate_keys(k, db_nested, populate=True):\n k_list = k.split(\".\")\n k_item_prev = k_list[0]\n populated = False\n if k_item_prev not in db_nested and populate:\n populated = True\n db_nested[k_item_prev] = None\n for k_item in k_list[1:]:\n if isinstance(db_nested[k_item_prev], dict):\n if k_item not in db_nested[k_item_prev]:\n if not populate:\n raise DbException(\"Cannot set '{}', not existing '{}'\".format(k, k_item))\n populated = True\n db_nested[k_item_prev][k_item] = None\n elif isinstance(db_nested[k_item_prev], list) and k_item.isdigit():\n # extend list with Nones if index greater than list\n k_item = int(k_item)\n if k_item >= len(db_nested[k_item_prev]):\n if not populate:\n raise DbException(\"Cannot set '{}', index too large '{}'\".format(k, k_item))\n populated = True\n db_nested[k_item_prev] += [None] * (k_item - len(db_nested[k_item_prev]) + 1)\n elif db_nested[k_item_prev] is None:\n if not populate:\n raise DbException(\"Cannot set '{}', not existing '{}'\".format(k, k_item))\n populated = True\n db_nested[k_item_prev] = {k_item: None}\n else: # number, string, boolean, ... or list but with not integer key\n raise DbException(\"Cannot set '{}' on existing '{}={}'\".format(k, k_item_prev,\n db_nested[k_item_prev]))\n db_nested = db_nested[k_item_prev]\n k_item_prev = k_item\n return db_nested, k_item_prev, populated\n\n updated = False\n try:\n if update_dict:\n for dot_k, v in update_dict.items():\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item)\n dict_to_update[key_to_update] = v\n updated = True\n if unset:\n for dot_k in unset:\n try:\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item, populate=False)\n del dict_to_update[key_to_update]\n updated = True\n except Exception:\n pass\n if pull:\n for dot_k, v in pull.items():\n try:\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item, populate=False)\n except Exception:\n continue\n if key_to_update not in dict_to_update:\n continue\n if not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot pull '{}'. Target is not a list\".format(dot_k))\n while v in dict_to_update[key_to_update]:\n dict_to_update[key_to_update].remove(v)\n updated = True\n if pull_list:\n for dot_k, v in pull_list.items():\n if not isinstance(v, list):\n raise DbException(\"Invalid content at pull_list, '{}' must be an array\".format(dot_k),\n http_code=HTTPStatus.BAD_REQUEST)\n try:\n dict_to_update, key_to_update, _ = _iterate_keys(dot_k, db_item, populate=False)\n except Exception:\n continue\n if key_to_update not in dict_to_update:\n continue\n if not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot pull_list '{}'. Target is not a list\".format(dot_k))\n for single_v in v:\n while single_v in dict_to_update[key_to_update]:\n dict_to_update[key_to_update].remove(single_v)\n updated = True\n if push:\n for dot_k, v in push.items():\n dict_to_update, key_to_update, populated = _iterate_keys(dot_k, db_item)\n if isinstance(dict_to_update, dict) and key_to_update not in dict_to_update:\n dict_to_update[key_to_update] = [v]\n updated = True\n elif populated and dict_to_update[key_to_update] is None:\n dict_to_update[key_to_update] = [v]\n updated = True\n elif not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot push '{}'. Target is not a list\".format(dot_k))\n else:\n dict_to_update[key_to_update].append(v)\n updated = True\n if push_list:\n for dot_k, v in push_list.items():\n if not isinstance(v, list):\n raise DbException(\"Invalid content at push_list, '{}' must be an array\".format(dot_k),\n http_code=HTTPStatus.BAD_REQUEST)\n dict_to_update, key_to_update, populated = _iterate_keys(dot_k, db_item)\n if isinstance(dict_to_update, dict) and key_to_update not in dict_to_update:\n dict_to_update[key_to_update] = v.copy()\n updated = True\n elif populated and dict_to_update[key_to_update] is None:\n dict_to_update[key_to_update] = v.copy()\n updated = True\n elif not isinstance(dict_to_update[key_to_update], list):\n raise DbException(\"Cannot push '{}'. Target is not a list\".format(dot_k),\n http_code=HTTPStatus.CONFLICT)\n else:\n dict_to_update[key_to_update] += v\n updated = True\n\n return updated\n except DbException:\n raise\n except Exception as e: # TODO refine\n raise DbException(str(e))", "def pdict(self, *args, **kwargs):\n if self is self.__root__:\n try:\n if self.all(lambda x: len(x) == 2):\n return pdict({k: v for k, v in self}).update(*args, **kwargs)\n except Exception:\n pass\n try:\n return pdict({k: v for k, v in zip(self.__root__.key, self)}).update(*args, **kwargs)\n except Exception:\n pass\n return pdict({k: v for k, v in zip(self.__root__, self)}).update(*args, **kwargs)", "def dubk(d, x, y, z):\n try:\n d[x][y]['list'].append(z)\n except KeyError as e:\n d[x][y] = {'list': [z]}", "def update(self, d):\n for k in d:\n self[k] = d[k]", "def set_dict_attrs(d, values):\n key = values.keys()[0]\n key_parts = key.split('.')\n if len(key_parts) > 1:\n if key_parts[:1][0] in d.keys():\n d[key_parts[:1][0]] = set_dict_attrs(d[key_parts[:1][0]],\n {'.'.join(key_parts[1:]): values.values()[0]})\n else:\n d[key_parts[:1][0]] = set_dict_attrs({}, {'.'.join(key_parts[1:]): values.values()[0]})\n else:\n d[key_parts[:1][0]] = values.values()[0]\n return d", "def set_from_json(raw: Dict, node):\n node.u = [\n np.array(raw[\"u{0}\".format(i)]) for i in range(len(node.u))\n ]\n node.observed = raw[\"observed\"]\n node.phi = [\n np.array(raw[\"phi{0}\".format(i)]) for i in range(len(node.phi))\n ]\n node.f = np.array(raw[\"f\"])\n node.g = np.array(raw[\"g\"])\n return node", "def set(self, key, value):\r\n self.set_many({key: value})", "def test_updatewithdictionarybycomparingdictionaries(self):\n s1 = Square(10, 2, 1, 9)\n s1_dictionary = s1.to_dictionary()\n s2 = Square(1, 1)\n s2.update(**s1_dictionary)\n self.assertEqual(s1.__dict__, s2.__dict__)", "def set(self, key, value):", "def set(self, key, value):", "def update(self, dict=None, **kwargs):\n data = {}\n if dict:\n data.update(dict, **kwargs)\n else:\n data.update(**kwargs)\n self.multi_set(data)", "def set_data(self,key='',val=None):\n parent_itm = self._root\n if '.' in key:\n parent_itm = self.get_data(self.parent_key(key))\n itm_key = key.split('.')[-1]\n if itm_key:\n try: \n parent_itm[itm_key] = val\n except:\n try: \n parent_itm[int(itm_key)] = val # list case\n except:\n parent_itm.append(val) # append to list case", "def update(self, other_dict):\n if isinstance(other_dict, MultiDict):\n for key, value_list in other_dict.lists():\n self.setlistdefault(key, []).extend(value_list)\n else:\n for key, value in other_dict.items():\n self.setlistdefault(key, []).append(value)", "def test_deep_extend(self):\n sdict = {\"bar\": {\"baz\": [1, 2]}}\n res = dictupdate.extend_dict_key_value(sdict, \"bar:baz\", [42, 42])\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 42]}}, res)\n\n # Extend a not-yet existing list\n res = dictupdate.extend_dict_key_value({}, \"bar:baz:qux\", [42])\n self.assertEqual({\"bar\": {\"baz\": {\"qux\": [42]}}}, res)\n\n # Extend with a dict (remember, foo has been updated in the first test)\n res = dictupdate.extend_dict_key_value(sdict, \"bar:baz\", {\"qux\": \"quux\"})\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 42, \"qux\"]}}, res)", "def add_nested_dependencies(dependency_map: Dict[str, Set[str]]) -> Dict[str, Set[str]]:\n # path can be at most as long as the total number of items\n for _ in range(len(dependency_map)):\n for dependencies in dependency_map.values():\n for dependent_key in dependencies.copy():\n dependencies.update(dependency_map[dependent_key])", "def set_by_path(data: Dict[str, T], path: Sequence[str], value: T):\n get_by_path(data, path[:-1])[path[-1]] = value", "def _set(self, ikey, value):\n obj = self\n keys = ikey.split('.')\n for idx in range(0, len(keys)):\n key = keys[idx]\n if not obj.has_key(key):\n ckey = '.'.join(keys[idx:])\n nkey, nval = convert_dot_notation(ckey, value)\n if isinstance(obj, DotDict):\n super(DotDict, obj).__setitem__(nkey, nval)\n else:\n obj.__setitem__(nkey, nval)\n return\n if key != keys[-1]:\n try:\n obj = super(DotDict, obj).__getitem__(key)\n except:\n try:\n obj = obj[key]\n except:\n raise\n if not isinstance(obj, dict):\n msg = 'Cannot assign new value, internal obj is not dict'\n raise Exception(msg)\n if isinstance(obj, DotDict):\n super(DotDict, obj).__setitem__(key, value)\n else:\n obj.__setitem__(key, value)", "def test_dictfield_update(self):\n\n class Club(Document):\n members = DictField()\n\n club = Club()\n club.members[\"John\"] = {\"gender\": \"M\", \"age\": 13}\n club.save()\n\n Club.objects().update(set__members={\"John\": {\"gender\": \"F\", \"age\": 14}})\n\n club = Club.objects().first()\n assert club.members[\"John\"][\"gender\"] == \"F\"\n assert club.members[\"John\"][\"age\"] == 14", "def __setitem__(self, key, value):\n self.tree[key] = value", "def set(self, key: t.Hashable, value: t.Any) -> None:", "def test_updatewithdict(self):\n s1 = Square(10, 2, 1)\n s1_dictionary = s1.to_dictionary()\n s2 = Square(1, 1)\n s2.update(**s1_dictionary)\n self.assertEqual(s2.size, 10)\n self.assertEqual(s2.x, 2)\n self.assertEqual(s2.y, 1)", "def test_single_level(self):\n dict_1 = {\n 'key_1': 'original_value_1',\n 'key_2': 'original_value_2'\n }\n dict_2 = {\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3'\n }\n\n result = deep_dict_merge(dict_1, dict_2)\n\n assert dict_1 == {\n 'key_1': 'original_value_1',\n 'key_2': 'original_value_2'\n }\n assert dict_2 == {\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3'\n }\n assert result == {\n 'key_1': 'original_value_1',\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3',\n }", "def visit_record(self, syrecord):\n for other_key, other_value in syrecord.items():\n try:\n getattr(self.current, other_key).update(other_value)\n except KeyError:\n setattr(self.current, other_key, other_value)", "def deepupdate(target, src, overwrite=True):\n for k, v in src.items():\n if type(v) == list:\n if k not in target:\n target[k] = copy.deepcopy(v)\n elif overwrite is True:\n target[k].extend(v)\n elif type(v) == dict:\n if k not in target:\n target[k] = copy.deepcopy(v)\n else:\n deepupdate(target[k], v, overwrite=overwrite)\n elif type(v) == set:\n if k not in target:\n target[k] = v.copy()\n elif overwrite is True:\n if type(target[k]) == list:\n target[k].extend(v)\n elif type(target[k]) == set:\n target[k].update(v)\n else:\n raise TypeError(\"Cannot update {} with {}\".format(\n type(target[k]),\n type(v))\n )\n else:\n if k not in target or overwrite is True:\n target[k] = copy.copy(v)", "def set_by_dict(self, dictionary):\n assert 'numerator' in dictionary and 'denominator' in dictionary\n self.numerator = int(dictionary['numerator'])\n self.denominator = int(dictionary['denominator'])\n return True", "def set_key(self, key, value):\n if '.' in key:\n key, remainder = key.split('.', 1)\n try:\n self[key].set_key(remainder, value)\n except KeyError:\n self[key] = AttrDict()\n self[key].set_key(remainder, value)\n except AttributeError:\n if self[key] is None: # If the value is None, we replace it\n self[key] = AttrDict()\n self[key].set_key(remainder, value)\n # Else there is probably something there, and we don't just\n # want to overwrite so stop and warn the user\n else:\n raise KeyError('Cannot set nested key on non-dict key.')\n else:\n self[key] = value", "def set_deep(config, key_seq, new_val):\n if 1 == len(key_seq):\n config[key_seq[0]] = new_val\n else:\n set_deep(config[key_seq[0]], key_seq[1:], new_val)", "def _update(value: Dict[str, Any], update: Dict[str, Any]):\n for key, val in update.items():\n\n if key not in value:\n value[key] = val\n elif isinstance(val, dict):\n value[key] = _update(value[key], val)\n else:\n value[key] = val\n return value", "def delveset(o,*k,**kwargs):\n\tvalue = kwargs.pop('value',None)\n\tif value==None: raise Exception('delveset needs a value')\n\tif kwargs: raise Exception('unprocessed kwargs %s'%str(kwargs))\n\tif len(k)==0: raise Exception('deepset needs a path')\n\telif len(k)==1: \n\t\ttry: o[k[0]] = value\n\t\texcept:\n\t\t\timport pdb;pdb.set_trace()\n\telse:\n\t\tif k[0] not in o: o[k[0]] = {}\n\t\tdelveset(o[k[0]],*k[1:],value=value)", "def _set_by_path(tree, keys, value):\n _get_by_path(tree, keys[:-1])[keys[-1]] = value", "def _set_by_path(tree, keys, value):\n _get_by_path(tree, keys[:-1])[keys[-1]] = value", "def updateDict(self,strSet):\n\tself.createAdjList(strSet,\"remove\")" ]
[ "0.71018374", "0.70984405", "0.704741", "0.6844214", "0.64120656", "0.63191766", "0.62922853", "0.6158688", "0.61120975", "0.611104", "0.5974053", "0.5923545", "0.58683455", "0.57761884", "0.57549286", "0.5738851", "0.5720999", "0.5719014", "0.5687486", "0.5683523", "0.56826097", "0.5681803", "0.5677057", "0.56662995", "0.5653971", "0.56539315", "0.56411994", "0.5636299", "0.560673", "0.558012", "0.55749106", "0.55721855", "0.5565189", "0.55575675", "0.5540879", "0.55254906", "0.55246824", "0.5514271", "0.55046993", "0.5500304", "0.54702616", "0.5463096", "0.5442991", "0.54335654", "0.5422272", "0.54219836", "0.54173374", "0.5413152", "0.54102767", "0.540925", "0.5398759", "0.5398624", "0.5397795", "0.5392536", "0.5390717", "0.53888404", "0.53876495", "0.5370708", "0.53613317", "0.5360135", "0.53595644", "0.5352956", "0.53500974", "0.5344505", "0.5343277", "0.5335607", "0.53317565", "0.5329813", "0.5317059", "0.5302964", "0.53001595", "0.5300043", "0.5297675", "0.52964336", "0.52952623", "0.52940184", "0.5289124", "0.5289124", "0.527475", "0.5254179", "0.52493435", "0.5241965", "0.5231845", "0.5231058", "0.52234465", "0.5216147", "0.5200614", "0.51991576", "0.5181896", "0.5181486", "0.51743215", "0.51662076", "0.51654696", "0.5154922", "0.5129157", "0.5126658", "0.5121238", "0.51205236", "0.51205236", "0.51187354" ]
0.5194484
88
Flattens nested dicts into a single level dict.
def dict_path(my_dict, path=None): if path is None: path = "" for k, v in my_dict.items(): newpath = path + ("." if path != "" else "") + k if isinstance(v, dict): for u in dict_path(v, newpath): yield u else: yield newpath, v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flatten_dict(d):\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in flatten_dict(value).items():\n yield subkey, subvalue\n else:\n yield key, value\n\n return dict(items())", "def flatten_dict(nested):\n flattened = {}\n for key, value in nested.items():\n if isinstance(value, Mapping):\n for subkey, subval in value.items():\n newkey = '.'.join([key, subkey])\n flattened[newkey] = subval\n flatten_dict(flattened)\n else:\n flattened[key] = value\n mappings = [isinstance(value, Mapping) for key, value in flattened.items()]\n if len(set(mappings)) == 1 and set(mappings).pop() is False:\n return flattened\n else:\n return flatten_dict(flattened)", "def _flatten_dict(x: Dict) ->Dict:\n new_dict = {}\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n new_dict[k] = v\n else:\n new_dict[key] = value\n return new_dict", "def flatten_dict(d):\n\n def expand(key, value):\n if isinstance(value, dict):\n return [(key + '.' + k, v) for k, v in flatten_dict(value).items()]\n else:\n return [(key, value)]\n\n items = [item for k, v in d.items() for item in expand(k, v)]\n return dict(items)", "def flatten_dict(nested, prefix=''):\n result = dict()\n\n for key, value in nested.items():\n prefix_key = '__'.join([prefix, str(key)]) if len(prefix) else key\n\n if key in IGNORED_DICT_KEYS and not isinstance(value, (dict, list)):\n continue\n\n elif isinstance(value, dict):\n result.update(flatten_dict(value, prefix_key))\n\n elif isinstance(value, (np.ndarray, list)):\n result.update(flatten_array(value, prefix_key))\n\n else:\n result[prefix_key] = value\n\n return result", "def flatten(self):\n flat = {}\n for d in self.dicts:\n flat.update(d)\n return flat", "def unflatten(\n d: Dict[str, Any],\n base: Dict[str, Any] = None,\n) -> Dict[str, Any]:\n if base is None:\n base = {}\n\n for key, value in d.items():\n root = base\n\n ###\n # If a dotted path is encountered, create nested dicts for all but\n # the last level, then change root to that last level, and key to\n # the final key in the path. This allows one final setitem at the bottom\n # of the loop.\n if '.' in key:\n *parts, key = key.split('.')\n\n for part in parts:\n root.setdefault(part, {})\n root = root[part]\n\n if isinstance(value, dict):\n value = unflatten(value, root.get(key, {}))\n\n root[key] = value\n\n return base", "def flatten(d):\n\n c = {}\n\n def _flatten(parents, items):\n for k, v in items:\n cur = parents + [k]\n if isinstance(v, list):\n _flatten(cur, enumerate(v))\n elif isinstance(v, dict):\n _flatten(cur, v.items())\n else:\n if v is None:\n cur.append('$NULL')\n v = ''\n name = str(cur[0]) + ''.join(['['+str(x)+']' for x in cur[1:]])\n c[name] = v\n \n _flatten([], d.items())\n\n return c", "def flatten_nested_dict(prefix, nested_dict):\n\n cleaned_nested_dict = {}\n cleaned_nested_dict = {\n f'{prefix}_{key}': val for key, val in nested_dict.items()}\n\n return cleaned_nested_dict", "def _flatten_dict(self, d: Mapping[str, Any]) -> Dict[str, Any]:\n nested = {k for k, v in d.items() if isinstance(v, (Mapping, Configuration))}\n if self._lowercase:\n result = {\n k.lower() + \".\" + ki: vi\n for k in nested\n for ki, vi in self._flatten_dict(d[k]).items()\n }\n result.update(\n (k.lower(), v)\n for k, v in d.items()\n if not isinstance(v, (Mapping, Configuration))\n )\n else:\n result = {\n k + \".\" + ki: vi\n for k in nested\n for ki, vi in self._flatten_dict(d[k]).items()\n }\n result.update(\n (k, v)\n for k, v in d.items()\n if not isinstance(v, (Mapping, Configuration))\n )\n return result", "def flatten_dict(d, parent_key=\"\", sep=\"_\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def flatten(d: Union[dict, list], parent_key: str = \"\", sep: str = \".\") -> dict:\n items = []\n if isinstance(d, dict):\n for k, v in d.items():\n new_key = f\"{parent_key}{sep}{k}\" if parent_key else str(k)\n items.extend(flatten(v, new_key, sep=sep).items())\n elif isinstance(d, list):\n for i, elem in enumerate(d):\n new_key = f\"{parent_key}{sep}{i}\" if parent_key else str(i)\n items.extend(flatten(elem, new_key, sep).items())\n else:\n items.append((parent_key, d))\n return dict(items)", "def flatten_values(d):\n if isinstance(d, dict):\n for v in d.values():\n if isinstance(v, dict):\n yield from flatten_values(v)\n else:\n yield v\n else:\n yield d", "def flatten_dict(d, separator=':', _parent_key=''):\n items = []\n for k, v in d.items():\n new_key = _parent_key + separator + k if _parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, separator=separator, _parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def unflatten_dict(flat):\n unflattened = dict()\n\n for key, value in sorted(flat.items(), key=_key_order):\n if '__' in key:\n key, subkey = key.split('__', 1)\n subkey, name = subkey.rsplit('__', 1)\n\n if name.isdigit():\n column_index = int(name)\n row_index = int(subkey)\n\n array = unflattened.setdefault(key, list())\n\n if len(array) == row_index:\n row = list()\n array.append(row)\n elif len(array) == row_index + 1:\n row = array[row_index]\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n if len(row) == column_index:\n row.append(value)\n else:\n # This should never happen\n raise ValueError('There was an error unflattening the extension.')\n\n else:\n subdict = unflattened.setdefault(key, dict())\n if subkey.isdigit():\n subkey = int(subkey)\n\n inner = subdict.setdefault(subkey, dict())\n inner[name] = value\n\n else:\n unflattened[key] = value\n\n return unflattened", "def expand_flattened_dict(flattened, separator='.'):\n merged = {}\n for key, value in flattened.items():\n expanded = expand_flattened_path(key, value=value, separator=separator)\n merged = merge_dicts(merged, expanded)\n return merged", "def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def flatten_dict(d, sep=' ', parent_key=''):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def _flatten_dict(self, current, key, result):\n if isinstance(current, dict):\n for k in current:\n new_key = \"{1}\".format(key, k) if len(key) > 0 else k\n self._flatten_dict(current[k], new_key, result)\n else:\n result[key] = current\n return result", "def flatten_dict(dict_input):\n flattened_dict = dict()\n\n for key, value in dict_input.items():\n if isinstance(value, dict):\n new_keys = sorted(value.keys())\n for new_key in new_keys:\n entry = {key + '_' + new_key: value[new_key]}\n flattened_dict.update(entry)\n else:\n entry = {key: value}\n flattened_dict.update(entry)\n\n return flattened_dict", "def flatten(d: MutableMapping, sep: str = \".\", parent_key: str = \"\") -> dict:\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten(v, sep=sep, parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "def _flatten_dictionary(self, params, parent=None):\r\n data = OrderedDict()\r\n for key, val in params.items():\r\n full_key = parent + \"[\" + key + \"]\" if parent else key\r\n if isinstance(val, dict):\r\n data.update(self._flatten_dictionary(val, full_key))\r\n else:\r\n data[full_key] = val\r\n return data", "def flatten_dict(d, target='key' or 'value'):\n def format_nested(nested, _key=None):\n if _key is None:\n _key = key\n return '{}_{}'.format(_key, nested)\n\n flattened = []\n target_is_key = target == 'key'\n for key, value in OrderedDict(sorted(d.items())).items():\n\n # Simple case: recursively flatten the dictionary.\n if isinstance(value, dict):\n flattened += map(\n format_nested if target_is_key else lambda x: x,\n flatten_dict(value, target=target)\n )\n\n # We are suddenly in muddy waters, because lists can have multiple types within them in JSON.\n elif isinstance(value, list):\n items_are_dict = [isinstance(item, dict) for item in value]\n items_are_list = [isinstance(item, list) for item in value]\n\n # To help reduce the complexity here, let's not support this case.\n # Besides, most sensible APIs won't bump into this case.\n if any(items_are_dict) and not all(items_are_dict):\n raise NotImplementedError(\"Ability to flatten dict with list of mixed dict and non-dict types \"\n \"is not currently supported\")\n\n # Same here, this is just weird.\n if any(items_are_list):\n raise NotImplementedError(\"Ability to flatten a dict with lists within lists \"\n \"is not currently supported. And we'd like to ask you to take it easy.\")\n\n # This case is common, but a little complex.\n elif all(items_are_dict):\n for index, item in enumerate(value):\n _flattened_dict = flatten_dict(item, target=target)\n\n # In this case we actually want to prepend the dict's index in the list to each flattened dict.\n if target_is_key:\n _flattened_dict = [format_nested(flattened_item, _key=index)\n for flattened_item in _flattened_dict]\n\n flattened += map(format_nested if target_is_key else lambda x: x, _flattened_dict)\n\n # All items are non-dict, so just directly add either the index or the value.\n else:\n flattened += map(format_nested, range(len(value))) if target_is_key else value\n\n # Kindergarten -- just add to the list.\n else:\n flattened.append(key if target_is_key else value)\n return flattened", "def FlattenDictionary(value, keys=[]):\n result = {}\n if type(value) is dict:\n for key in value:\n result.update(FlattenDictionary(value[key], keys + [key]))\n return result\n else:\n key = '.'.join(keys)\n return {key: value}", "def flatten_dict(dictionary,keys=None):\n flat_dict = {}\n for k,v in dictionary.items():\n if not keys or k in keys:\n if isinstance(v,dict):\n r = flatten_dict(v)\n flat_dict.update(r)\n else:\n flat_dict[k] = v\n return flat_dict", "def flatten_json(json_object):\n flat_json = {}\n # Check that either an element is already flat, or flatten it\n for key in json_object.keys():\n # if the element is not flat, flatten it recursively\n if isinstance(json_object[key], dict):\n flat_sub_json = flatten_json(json_object[key])\n # add each newly flattened value to the result json\n for sub_key in flat_sub_json.keys():\n flat_json[key + \".\" + sub_key] = flat_sub_json[sub_key]\n # if the element is flat, add it to the result\n else:\n flat_json[key] = json_object[key]\n return flat_json", "def flat_dict(d):\n nd = {}\n for (key, value) in d.items():\n nd[key] = value.pop()\n\n return nd", "def recursive_squeeze(dictlike):\n out = {}\n for k, v in dictlike.items():\n if isinstance(v, dict):\n out[k] = recursive_squeeze(v)\n else:\n out[k] = np.squeeze(v)\n return out", "def flatten(parsed):\r\n parsedList = [parsed] if isinstance(parsed, dict) else parsed\r\n result = []\r\n for dico in parsedList:\r\n #Sort keys between actual values and nested dicts\r\n listKeys = []\r\n standardKeys = []\r\n for key in dico:\r\n if isinstance(dico[key], list):\r\n listKeys.append(key)\r\n else:\r\n standardKeys.append(key)\r\n if not listKeys:\r\n #Terminal condition: no nested dict\r\n result.append(dico)\r\n else:\r\n partialResult = [{x:dico[x] for x in standardKeys}]\r\n for key in listKeys:\r\n #Create a dict with the keys from partialResult and\r\n #from the nested dicts\r\n recurs = Bricks.flatten(dico[key])\r\n partialResult = [{**x, **y} for x in partialResult for y in recurs]\r\n result.extend(partialResult)\r\n return result", "def flatten_dictionaries(input):\n output = dict()\n if isinstance(input, list):\n for map in input:\n if not isinstance(map, dict):\n raise Exception('Tried to flatten a list of NON-dictionaries into a single dictionary. Whoops!')\n for key in map.keys(): #Add keys into output\n output[key]=map[key]\n else: #Not a list of dictionaries\n output = input;\n return output", "def flatten_dict(d):\n l = []\n for k, v in sorted(d.items()):\n if isinstance(v, dict):\n flatten_v = flatten_dict(v)\n for my_l in reversed(flatten_v):\n my_l.insert(0, k)\n\n l.extend(flatten_v)\n\n elif isinstance(v, list):\n for l_val in v:\n l.append([k, l_val])\n\n else:\n l.append([k, v])\n\n return l", "def flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n items = dict(items)\n # remove info like PCA primitive ID\n items_not_strings = {k: v for k, v in items.items() if type(v) != str}\n return dict(items_not_strings)", "def flatten(nested_dict, separator=\"_\", root_keys_to_ignore=set()):\n assert isinstance(nested_dict, dict), \"flatten requires a dictionary input\"\n assert isinstance(separator, six.string_types), \"separator must be string\"\n\n # This global dictionary stores the flattened keys and values and is\n # ultimately returned\n flattened_dict = dict()\n\n def _flatten(object_, key):\n \"\"\"\n For dict, list and set objects_ calls itself on the elements and for\n other types assigns the object_ to\n the corresponding key in the global flattened_dict\n :param object_: object to flatten\n :param key: carries the concatenated key for the object_\n :return: None\n \"\"\"\n # Empty object can't be iterated, take as is\n if not object_:\n flattened_dict[key] = object_\n # These object types support iteration\n elif isinstance(object_, dict):\n for object_key in object_:\n if not (not key and object_key in root_keys_to_ignore):\n _flatten(object_[object_key], _construct_key(key,\n separator,\n object_key))\n elif isinstance(object_, (list, set, tuple)):\n for index, item in enumerate(object_):\n _flatten(item, _construct_key(key, separator, index))\n # Anything left take as is\n else:\n flattened_dict[key] = object_\n\n _flatten(nested_dict, None)\n return flattened_dict\n\n ###############################################################################################", "def flatten(d, path):\n\n if isinstance(d, dict):\n for k, v in d.items():\n yield from flatten(v, path + [k])\n else:\n yield (\".\".join(path), d)", "def nested_to_flat(self, data: dict, target: str, **kwargs):\n data.update(data.pop(target, {}))\n return data", "def flatten_dict(\n d, parent_key=\"\", sep=\".\", ignore_under_prefixed=True, mark_value=True\n):\n items = {}\n for k in d:\n if ignore_under_prefixed and k.startswith(\"__\"):\n continue\n v = d[k]\n if mark_value and k.startswith(\"_\") and not k.startswith(\"__\"):\n v = MarkValue(repr(v))\n\n new_key = sep.join((parent_key, k)) if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.update(\n flatten_dict(\n v, new_key, sep=sep, ignore_under_prefixed=True, mark_value=True\n )\n )\n else:\n items[new_key] = v\n\n return items", "def flatten(dictionary, sep=\".\"):\n\n def _flatten(dictionary):\n if dictionary == {}:\n return dictionary\n\n key, value = dictionary.popitem()\n if not isinstance(value, dict) or not value:\n new_dictionary = {key: value}\n new_dictionary.update(flatten(dictionary, sep=sep))\n return new_dictionary\n\n flat_sub_dictionary = flatten(value, sep=sep)\n for flat_sub_key in list(flat_sub_dictionary.keys()):\n flat_key = key + sep + flat_sub_key\n flat_sub_dictionary[flat_key] = flat_sub_dictionary.pop(flat_sub_key)\n\n new_dictionary = flat_sub_dictionary\n new_dictionary.update(flatten(dictionary, sep=sep))\n return new_dictionary\n\n return _flatten(copy.deepcopy(dictionary))", "def pivot_nested_dict(nested_dict):\n # declare res as the return object which should be a dict\n res = dict()\n # traverse the pollsters\n for pollster in nested_dict:\n \t# travserse the states\n \tfor state in nested_dict[pollster]:\n \t\t# if first meet a state, we need to create a new dict\n \t\tif state not in res:\n \t\t\tres[state] = dict()\n \t\t# put the pollster value in the state dict\n \t\tres[state][pollster] = nested_dict[pollster][state]\n return res", "def _flatten(flat_json: FlatJSON, keys: FlatKey, obj: JSONType) -> FlatJSON:\n jtype = typeof(obj)\n if jtype is JSONValue:\n flat_json[keys] = obj\n elif jtype is JSONArray:\n for index, value in enumerate(obj):\n _flatten(flat_json, (*keys, index), value)\n elif jtype is JSONObject:\n for key, value in obj.items():\n _flatten(flat_json, (*keys, key), value)\n else:\n raise PheresInternalError(f\"Unhandled json type {jtype} in flatten()\")\n return flat_json", "def nested_dict():\n return defaultdict(nested_dict)", "def nest_dict(dct, keys):\n nested_dict = dct\n for key in reversed(keys):\n nested_dict = RecursiveDict({key: nested_dict})\n return nested_dict", "def simplify_dict(d: Dict[str, Any]) -> Dict[str, Any]:\n return {\n k: [ast_to_testing_string(n) for n in v] if k == \"children\" else v\n for k, v in d.items()\n }", "def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]", "def pivot_nested_dict(nested_dict):\r\n\r\n reverse_nest_dict = {} #Create an empty dictionary\r\n for k, v in nested_dict.items(): #Iterate through each pair of elements\r\n for k2, v2 in v.items(): #Iterate through pair of values\r\n try:\r\n reverse_nest_dict[k2][k] = v2\r\n except KeyError:\r\n reverse_nest_dict[k2] = { k : v2 }\r\n return reverse_nest_dict\r\n \r\n #Create a dictionary that produces a different nested dictionary which\r\n #contains the same values\r", "def _flatten_lists(\n data: Union[Dict[str, Any], List[Any], Any]\n ) -> Union[Dict[str, Any], Any]:\n if not isinstance(data, dict):\n return data\n copy_data = cast(Dict[str, Any], data.copy())\n for attr, val in copy_data.items():\n if isinstance(val, list):\n if len(cast(List[Any], val)) == 1:\n # pull single values out of lists\n data[attr] = _flatten_lists(cast(Any, val[0]))\n else:\n data[attr] = [_flatten_lists(v) for v in cast(List[Any], val)]\n elif isinstance(val, dict):\n data[attr] = _flatten_lists(cast(Dict[str, Any], val))\n return data", "def to_dict(self):\r\n new_dict = {}\r\n for key, val in self.items():\r\n if isinstance(val, NestedDict):\r\n new_dict[key] = val.to_dict()\r\n else:\r\n new_dict[key] = val\r\n return new_dict", "def flatten_dict(base, v, d):\n if base != '':\n base = base + \".\"\n for k in d:\n if type(d[k]) in (type('a'), type(u'a'), type(1), type(1.1), type(False), type(None)):\n v[base + k] = d[k]\n elif type(d[k]) in (type([]), type((1,2))):\n v[base + k] = \", \".join(d[k])\n elif type(d[k]) == type({}):\n flatten_dict(base + k, v, d[k])\n else:\n print \"huh,\", type(d[k])", "def deep_merge(d, u):\n stack = [(d, u)]\n while stack:\n d, u = stack.pop(0)\n for k, v in u.items():\n if not isinstance(v, collections.abc.Mapping):\n d[k] = v\n else:\n dv = d.setdefault(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n else:\n stack.append((dv, v))", "def flatten_entrypoints(ep):\n def _flatten(d, prefix):\n d1 = {}\n for k, v in d.items():\n if isinstance(v, dict):\n yield from _flatten(v, prefix+'.'+k)\n else:\n d1[k] = v\n\n if d1:\n yield prefix, d1\n\n res = {}\n for k, v in ep.items():\n res.update(_flatten(v, k))\n return res", "def flatten(data, delim='_'):\n result = {}\n\n def flatten_dict(keys, name=''):\n if isinstance(keys, collections.MutableMapping):\n for value in keys:\n flatten_dict(keys[value], \"{}{}{}\".format(name, value, delim))\n elif isinstance(keys, list):\n count = 0\n for value in keys:\n if isinstance(value, collections.MutableMapping):\n flatten_dict(value, \"{}{}{}\".format(name, count, delim))\n else:\n result[name[:-1]] = keys\n count += 1\n else:\n result[name[:-1]] = keys\n\n flatten_dict(data)\n return result", "def unflatten(arg):\n if hasattr(arg, \"iteritems\"):\n items = arg.iteritems()\n elif hasattr(arg, \"items\"):\n items = arg.items()\n else:\n items = arg\n\n data = {}\n holders = []\n for flat_key, val in items:\n parsed_key = _parse_key(flat_key)\n obj = data\n for depth, (key, next_key) in enumerate(zip(parsed_key, parsed_key[1:]), 1):\n if isinstance(next_key, string_type):\n holder_type = _dict_holder\n else:\n holder_type = _list_holder\n\n if key not in obj:\n obj[key] = holder_type(_unparse_key(parsed_key[:depth]))\n holders.append((obj, key))\n elif not isinstance(obj[key], holder_type):\n raise ValueError(\n \"conflicting types %s and %s for key %r\"\n % (\n _node_type(obj[key]),\n holder_type.node_type,\n _unparse_key(parsed_key[:depth]),\n )\n )\n obj = obj[key]\n\n last_key = parsed_key[-1]\n if isinstance(obj.get(last_key), _holder):\n raise ValueError(\n \"conflicting types %s and terminal for key %r\"\n % (_node_type(obj[last_key]), flat_key)\n )\n obj[last_key] = val\n\n for obj, key in reversed(holders):\n obj[key] = obj[key].getvalue()\n\n return data", "def flatten_tree(tree):\n yield dict([\n (k, v)\n for k, v in tree.items()\n if k != 'contents'\n ])\n if 'contents' in tree:\n for x in tree['contents']:\n for y in flatten_tree(x):\n yield y", "def flatten(obj: JSONObject) -> FlatJSON:\n return _flatten({}, tuple(), obj)", "def _unflatten_dict_by_feature_name(flattened_dict: Dict[str, Any]) ->Dict[str, Dict[str, Any]]:\n outputs: Dict[str, Dict[str, Any]] = {}\n for concat_key, tensor_values in flattened_dict.items():\n feature_name = get_feature_name_from_concat_name(concat_key)\n tensor_name = get_tensor_name_from_concat_name(concat_key)\n feature_outputs: Dict[str, Any] = {}\n if feature_name not in outputs:\n outputs[feature_name] = feature_outputs\n else:\n feature_outputs = outputs[feature_name]\n feature_outputs[tensor_name] = tensor_values\n return outputs", "def _flatten_dict(self, obj, prefix=''):\n\n encoded_dict = QueryDict('').copy()\n\n if hasattr(obj, 'items'):\n for key, value in obj.items():\n\n item_key = '%(prefix)s%(key)s' % { 'prefix': prefix, 'key': key }\n\n # Flatten lists for formsets and model choice fields\n if isinstance(value, list):\n for i, item in enumerate(value):\n\n if isinstance(item, dict):\n\n # Flatten nested object to work with formsets\n item_prefix = '%(key)s-%(index)d-' % { 'key': key, 'index': i }\n encoded_dict.update(self._flatten_dict(item, prefix=item_prefix))\n\n # ID for use with model multi choice fields\n id_value = item.get('id', None)\n if id_value:\n encoded_dict.update({ key: id_value })\n\n else:\n\n # Value for use with model multi choice fields\n encoded_dict.update({ key: item })\n\n # ID for use with model choice fields\n elif isinstance(value, dict):\n encoded_dict[item_key] = value.get('id', value)\n\n # Keep JavaScript null as Python None\n elif value is None:\n encoded_dict[item_key] = None\n\n # Other values are used directly\n else:\n encoded_dict[item_key] = unicode(value)\n\n return encoded_dict", "def _flatten(object_, key):\n # Empty object can't be iterated, take as is\n if not object_:\n flattened_dict[key] = object_\n # These object types support iteration\n elif isinstance(object_, dict):\n for object_key in object_:\n if not (not key and object_key in root_keys_to_ignore):\n _flatten(object_[object_key], _construct_key(key,\n separator,\n object_key))\n elif isinstance(object_, (list, set, tuple)):\n for index, item in enumerate(object_):\n _flatten(item, _construct_key(key, separator, index))\n # Anything left take as is\n else:\n flattened_dict[key] = object_", "def sub_dict(d):\n r = {}\n for k in d:\n if type(d[k]) in prims:\n r[k] = d[k]\n elif type(d[k]) is list:\n r[k] = sub_list(d[k])\n elif type(d[k]) is dict:\n r[k] = sub_dict(d[k])\n else:\n print \"Unknown Type: {}\".format(type(d[k]))\n return r", "def _flatten_metadata(metadata):\r\n if metadata:\r\n # After changing recursive_keypairs` output we need to keep\r\n # flattening output unchanged.\r\n # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.')\r\n # output before: a.b:c=d\r\n # output now: a.b.c=d\r\n # So to keep the first variant just replace all dots except the first\r\n return dict((k.replace('.', ':').replace(':', '.', 1),\r\n six.text_type(v))\r\n for k, v in utils.recursive_keypairs(metadata,\r\n separator='.')\r\n if type(v) is not set)\r\n return {}", "def deepupdate(original, update):\n for key, value in original.iteritems():\n if not key in update:\n update[key] = value\n elif isinstance(value, dict):\n deepupdate(value, update[key])\n return update", "def flatten_dict(in_obj: Dict[Any, Any], *, sep: str = '_', key_maker: Callable = None) -> Dict[str, Any]:\n\n if key_maker is None:\n key_maker = sep.join\n out_dict = {}\n for key, obj in in_obj.items():\n\n try:\n\n for inner_key, value in obj.items():\n try:\n\n out_dict.update(\n flatten_dict(\n {(key, inner_key): value},\n sep=sep,\n key_maker=key_maker\n )\n )\n\n except AttributeError:\n out_dict[key_maker(flatten_keys([key, inner_key]))] = value\n\n except AttributeError:\n out_dict[key_maker(flatten_keys(key))] = obj\n\n return out_dict", "def nestedDictValues(d):\n for key in sorted(d.keys()):\n if isinstance(d[key], dict):\n yield from nestedDictValues(d[key])\n else:\n yield d[key]", "def iteritemsdeep(dct):\n for (key, val) in dct.items():\n if isinstance(val, dict):\n for (key_child, val_child) in iteritemsdeep(val):\n yield ((key,) + key_child, val_child)\n else:\n yield ((key,), val)", "def as_dict(self, flat=False):\n if not flat:\n return self.as_dict_nested()\n else:\n return self.as_dict_flat()", "def flatten_hpo_params(params_dict: DictConfig) -> DictConfig:\n\n def process_params(nested_params: DictConfig, keys: list[str], flattened_params: DictConfig) -> None:\n \"\"\"Flatten nested dictionary till the time it reaches the hpo params.\n\n Recursive helper function that traverses the nested config object and stores the leaf nodes in a flattened\n dictionary.\n\n Args:\n nested_params: DictConfig: config object containing the original parameters.\n keys: list[str]: list of keys leading to the current location in the config.\n flattened_params: DictConfig: Dictionary in which the flattened parameters are stored.\n \"\"\"\n if len({\"values\", \"min\", \"max\"}.intersection(nested_params.keys())) > 0:\n key = \".\".join(keys)\n flattened_params[key] = nested_params\n else:\n for name, cfg in nested_params.items():\n if isinstance(cfg, DictConfig):\n process_params(cfg, keys + [str(name)], flattened_params)\n\n flattened_params_dict = DictConfig({})\n process_params(params_dict, [], flattened_params_dict)\n\n return flattened_params_dict", "def _deep_asdict(self):\n return {\n \"collections\": {k: p._deep_asdict() for (k, p) in self.collections.items()},\n }", "def flatten(self):\n flattened = {}\n for key, constructs in self.data.iteritems():\n if not constructs:\n continue\n construct_class = constructs[0].__class__ # we shouldn't ever mix\n if len(constructs) == 1:\n flattened[key] = construct_class(key, constructs[0].value, validate_value=False)\n else:\n flattened[key] = construct_class.and_(key, sorted(c.value for c in constructs))\n return flattened", "def flatten_json(object_json, prefix=\"\", flatten_result={}):\n assert(type(object_json) is dict)\n for (key, value) in object_json.items():\n # Because the arrays are not supported the only special case is the\n # nested object\n if type(value) is dict:\n flatten_json(value, build_flattened_key(\n prefix, key), flatten_result)\n else:\n # Treat the values that are specific number cases (Necessary to\n # return a valid json object)\n if value == float(\"inf\"):\n value = \"Infinity\"\n elif value == float(\"-inf\"):\n value = \"-Infinity\"\n\n flatten_result[build_flattened_key(prefix, key)] = value\n\n return flatten_result", "def _join_dicts(dicts):\n if dicts is None: # pragma: no cover\n return\n assembled_dict = {k: v for D in dicts for k, v in D.items()}\n return assembled_dict", "def unflatten(dictionary, sep=\".\"):\n unflattened_dictionary = {}\n for key, value in dictionary.items():\n parts = key.split(sep)\n sub_dictionary = unflattened_dictionary\n for part in parts[:-1]:\n if part not in sub_dictionary:\n sub_dictionary[part] = {}\n sub_dictionary = sub_dictionary[part]\n sub_dictionary[parts[-1]] = value\n return unflattened_dictionary", "def deep_normalize(d):\n if 'sudsobject' in str(d.__class__):\n d = deep_normalize(dict(d))\n elif isinstance(d, dict):\n for k,v in d.iteritems():\n if 'sudsobject' in str(v.__class__):\n #print k, v, '%s' % v.__class__\n r = deep_normalize(dict(v))\n d[k] = r\n elif isinstance(v, dict):\n r = deep_normalize(v)\n d[k] = r\n elif isinstance(v, (list, tuple, )):\n d[k] = [deep_normalize(i) for i in v]\n elif isinstance(v, datetime):\n # per problemi di permessi sugli oggetti datetime trasformo\n # in DateTime di Zope\n d[k] = DateTime(v.isoformat())\n elif isinstance(d, (list, tuple, )):\n d = [deep_normalize(i) for i in d]\n\n return d", "def dict_flatten(*args):\n hold = []\n for a in args:\n hold.append([i for s in a.values() for i in s])\n return hold", "def json_flatten(a, prefix=''):\r\n\r\n def add_flat(dict_, key, elem):\r\n \"\"\"If `elem` is itself a dict, merge it with `dict_`.\r\n Otherwise, store it in `dict_` under `key`.\r\n \"\"\"\r\n\r\n if isinstance(elem, dict):\r\n dict_.update(elem)\r\n else:\r\n dict_[key] = elem\r\n\r\n res = {}\r\n if isinstance(a, list):\r\n for n, elem in enumerate(a):\r\n add_flat(res, prefix, json_flatten(elem, \\\r\n prefix + \"[{0}]\".format(n)))\r\n elif isinstance(a, dict):\r\n for key in a.keys():\r\n new_prefix = prefix\r\n # Use a different syntax for keys with spaces.\r\n if ' ' in key:\r\n new_prefix += \"['{0}']\".format(key)\r\n else:\r\n new_prefix += \".{0}\".format(key)\r\n add_flat(res, prefix, json_flatten(a[key], new_prefix))\r\n # If a is not processable by json_flatten (e.g., it's a str) then store\r\n # it in res. However, at the top level we don't want to store such an a\r\n # as {'': a}. We also don't store None in res; we return it instead.\r\n elif a is not None and prefix != '':\r\n res[prefix] = a\r\n else:\r\n res = a\r\n\r\n return res", "def deep_update(d, u):\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collections.abc.Mapping):\n d[k] = v\n elif isinstance(v, collections.abc.Mapping):\n d[k] = deep_update(dv, v)\n else:\n d[k] = v\n return d", "def flatten(self) -> Dict[str, Any]:\n\n self_dict = asdict(self)\n experiment_completed_record_dict = self_dict.pop(\"experiment_completed_record\")\n\n return {\n **self_dict,\n **experiment_completed_record_dict,\n }", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def graph_walk_dict_flat(indict, pre=None):\n pre = pre[:] if pre else []\n if isinstance(indict, dict):\n for key, value in indict.items():\n if isinstance(value, dict):\n for d in dict_generator(value, [key] + pre):\n yield d\n elif isinstance(value, list) or isinstance(value, tuple):\n for v in value:\n for d in dict_generator(v, [key] + pre):\n yield d\n else:\n yield pre + [key, value]\n else:\n yield indict", "def walk_dict(dct):\n for k, v in dct.items():\n yield dct, k, v\n if isinstance(v, dict):\n for d_, k_, v_ in walk_dict(v):\n yield d_, k_, v_", "def flatten_state_dict(state_dict, keep_empty_nodes = False):\n return traverse_util.flatten_dict(\n state_dict,\n is_leaf=tensorstore_leaf,\n keep_empty_nodes=keep_empty_nodes,\n sep='/')", "def test_single_level(self):\n dict_1 = {\n 'key_1': 'original_value_1',\n 'key_2': 'original_value_2'\n }\n dict_2 = {\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3'\n }\n\n result = deep_dict_merge(dict_1, dict_2)\n\n assert dict_1 == {\n 'key_1': 'original_value_1',\n 'key_2': 'original_value_2'\n }\n assert dict_2 == {\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3'\n }\n assert result == {\n 'key_1': 'original_value_1',\n 'key_2': 'new_value_2',\n 'key_3': 'new_value_3',\n }", "def recursive_mapping_update(d, u):\n if u is not None:\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = recursive_mapping_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def flatten(params, key=None):\n flat = {}\n for name, val in list(params.items()):\n if key is not None and not isinstance(key, int):\n name = \"%s[%s]\" % (key, name)\n if isinstance(val, dict):\n flat.update(flatten(val, name))\n elif isinstance(val, list):\n flat.update(flatten(dict(enumerate(val)), name))\n elif val is not None:\n flat[name] = val\n return flat", "def flatten(obj, fields):\n # Format each value.\n d = {}\n for key in obj.keys():\n value = obj[key]\n if _safe_value(obj, key, value):\n # A safe value that is faithfully representable in YAML.\n d[key] = value\n else:\n # A value that should be edited as a string.\n d[key] = obj.formatted()[key]\n\n # Possibly filter field names.\n if fields:\n return {k: v for k, v in d.items() if k in fields}\n else:\n return d", "def make_recursive(obj):\n if isinstance(obj, list):\n for i, l in enumerate(obj):\n obj[i] = AttrDict.make_recursive(l)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = AttrDict.make_recursive(v)\n return AttrDict(obj)\n return obj", "def nestedDictKeys(d):\n for key in sorted(d.keys()):\n if isinstance(d[key], dict):\n yield from nestedDictKeys(d[key])\n else:\n yield key", "def merge_dictionaries(d1, d2):\n\n if d2 is None: \n return\n\n for k, v in d2.items():\n if k not in d1:\n d1[k] = dict()\n if isinstance(v, dict):\n merge_dictionaries(d1[k], v)\n else:\n d1[k] = v", "def flatten_array(nested, prefix=''):\n result = dict()\n for index in range(len(nested)):\n prefix_key = '__'.join([prefix, str(index)]) if len(prefix) else str(index)\n\n value = nested[index]\n if isinstance(value, (list, np.ndarray)):\n result.update(flatten_array(value, prefix=prefix_key))\n\n elif isinstance(value, dict):\n result.update(flatten_dict(value, prefix=prefix_key))\n\n else:\n result[prefix_key] = value\n\n return result", "def expand(d):\n # make sure everything is a list\n for k, v in d.iteritems():\n if type(v) is not list:\n d[k] = [v]\n\n # take cross product\n product = [x for x in apply(itertools.product, d.values())]\n return flatten([dict(zip(d.keys(), p)) for p in product])", "def merge_dict(a: dict, b: dict, path=None) -> dict:\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge_dict(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n a[key] = b[key]\n else:\n a[key] = b[key]\n return a", "def _deep_asdict(self):\n return {\n \"metadata\": self.metadata,\n \"artifacts\": {k: a._asdict() for (k, a) in self.artifacts.items()},\n }", "def flatten(self) -> Dict[str, Any]:\n\n self_dict = asdict(self)\n experiment_created_record_dict = self_dict.pop(\"experiment_created_record\")\n generation_strategy_created_record_dict = self_dict.pop(\n \"generation_strategy_created_record\"\n )\n\n return {\n **self_dict,\n **experiment_created_record_dict,\n **generation_strategy_created_record_dict,\n }", "def _flatten(self, value, parent_key=\"\"):\n items = []\n sep = self.header_separator if parent_key else \"\"\n\n if isinstance(value, dict):\n for k, v in value.items():\n # for dict, build a key field_subfield, e.g. title_subtitle\n new_key = parent_key + sep + k\n # skip excluded keys\n if new_key in self.csv_excluded_fields:\n continue\n if self.csv_included_fields and not self.key_in_field(\n new_key, self.csv_included_fields\n ):\n continue\n items.extend(self._flatten(v, new_key).items())\n elif isinstance(value, list):\n for index, item in enumerate(value):\n # for lists, build a key with an index, e.g. title_0_subtitle\n new_key = parent_key + sep + str(index)\n # skip excluded keys\n if new_key in self.csv_excluded_fields:\n continue\n if self.csv_included_fields and not self.key_in_field(\n parent_key, self.csv_included_fields\n ):\n continue\n items.extend(self._flatten(item, new_key).items())\n else:\n items.append((parent_key, value))\n\n return dict(items)", "def _unflatten_incr_state(\n self, flat_incr_state: Dict[str, torch.Tensor]\n ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n structured_incr_state = defaultdict(lambda: defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n # Turn the nested defaultdicts back into regular dicts", "def walk_map(d: dict, func: FunctionType):\n out = {}\n for k, v in d.items():\n if isinstance(v, (dict, defaultdict)):\n out[k] = walk_map(v, func)\n else:\n out[k] = func(v)\n return out", "def flatten(item):\n rv=item\n if isinstance(item,dict):\n rv={}\n for key,val in item.iteritems():\n rv[key] = flatten(val)\n elif isinstance(item,(list,tuple)):\n rv = []\n for val in item:\n rv.append(flatten(val))\n if item.__class__ == tuple:\n rv = tuple(rv)\n elif isinstance(item,ndb.Model):\n field_names = item._properties.keys()\n rv = {} \n for k in field_names:\n value = getattr(item, k)\n rv[k] = flatten(value)\n elif isinstance(item,ndb.GeoPt):\n rv = {'lat':item.lat, 'lon':item.lon}\n elif isinstance(item,(datetime.datetime,datetime.time)):\n rv = item.isoformat()\n if not item.utcoffset():\n rv += 'Z'\n elif isinstance(item,(datetime.date)):\n rv = item.isoformat()\n #elif isinstance(item,long):\n # rv = '%d'%item\n elif isinstance(item,(unicode,str,decimal.Decimal)):\n rv = str(item).replace(\"'\",\"\\'\")\n elif isinstance(item,(dict,list,tuple)):\n rv = flatten(item)\n return rv", "def dict_normalization(dict_, nested=False):\n dict_norm = dict()\n if not nested:\n if dict_.values():\n d_max = max(dict_.values())\n d_min = min(dict_.values())\n if d_max - d_min == 0:\n dict_norm = {key: 1 for key in dict_}\n else:\n dict_norm = {key: (dict_[key] - d_min) / (d_max - d_min) for key in dict_}\n else:\n for key_1 in dict_:\n if dict_[key_1]:\n dict_norm[key_1] = dict()\n else: continue\n d_max = max(dict_[key_1].values())\n d_min = min(dict_[key_1].values())\n for key_2 in dict_[key_1]:\n if d_max - d_min == 0:\n dict_norm[key_1][key_2] = 1 / len(dict_[key_1])\n else:\n dict_norm[key_1][key_2] = (dict_[key_1][key_2] - d_min) / (d_max - d_min)\n return dict_norm", "def merge_dicts(base, changes):\n for k, v in changes.items():\n if isinstance(v, dict):\n merge_dicts(base.setdefault(k, {}), v)\n else:\n base.setdefault(k, v)", "def recursive_update_cfg(d, u):\n for k, v in u.iteritems():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "def _json_normalize(x: dict) -> dict:\n return json.loads(json.dumps(x))", "def graph_walk_collection_flat(indict, pre=None):\n pre = pre[:] if pre else []\n \n # if isinstance(indict, dict):\n if type(indict) in [dict, OrderedDict]:\n for key, value in indict.items():\n # if isinstance(value, dict):\n if type(value) in [dict, OrderedDict]:\n for d in graph_walk_collection_flat(value, [key] + pre):\n yield d\n # elif isinstance(value, list) or isinstance(value, tuple):\n elif type(value) in [list, tuple]:\n for v in value:\n for d in graph_walk_collection_flat(v, [key] + pre):\n yield d\n else:\n yield pre + [key, value]\n else:\n yield indict", "def clean_dict(d):\n\n if not isinstance(d, (dict, list)):\n return d\n if isinstance(d, list):\n return [v for v in (clean_dict(v) for v in d) if v]\n return OrderedDict([(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v])", "def print_dict_tree(d, max_depth=None, indent=0):\n def _recurse(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict) and indent != max_depth:\n print(); _recurse(value, indent + 1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))\n \n return _recurse(d)" ]
[ "0.77844065", "0.7741226", "0.7617096", "0.75616306", "0.7490885", "0.74481535", "0.71848184", "0.7180198", "0.7081588", "0.7011167", "0.6980138", "0.6957304", "0.69023675", "0.6792159", "0.6791752", "0.6770507", "0.67688465", "0.676429", "0.67099506", "0.6697252", "0.66932225", "0.6630275", "0.66176987", "0.6613748", "0.65929025", "0.64913887", "0.6486943", "0.6476371", "0.64426494", "0.6434999", "0.6417603", "0.64126796", "0.6405014", "0.6391311", "0.6347407", "0.6299316", "0.6285216", "0.62031776", "0.6199574", "0.6192594", "0.6164692", "0.61489356", "0.60683525", "0.60627925", "0.6035056", "0.6034848", "0.6019743", "0.6000461", "0.5990432", "0.5981702", "0.5975718", "0.59614795", "0.59538245", "0.5936641", "0.5921347", "0.5905988", "0.5899192", "0.5896515", "0.5894193", "0.58853143", "0.58851355", "0.5870222", "0.5842622", "0.58410096", "0.5839804", "0.5835728", "0.5812298", "0.5806992", "0.58046585", "0.57928", "0.5782821", "0.5770737", "0.57366675", "0.57291305", "0.5724558", "0.5724397", "0.5698602", "0.5682879", "0.5677305", "0.566664", "0.56660306", "0.56645787", "0.56606936", "0.565016", "0.56491995", "0.56470394", "0.5629771", "0.56264806", "0.56201583", "0.5618529", "0.560806", "0.56050223", "0.5585425", "0.55737275", "0.5553014", "0.5550742", "0.5505727", "0.55005574", "0.5496625", "0.5492551", "0.54895264" ]
0.0
-1
Sum of the factorials of the digits of a number x
def factsum(x): return sum(list(map(lambda x: factorial(x), getdigits(x))))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def factorial_digit_sum(n):\n sum = 0\n factStr = str(factorial(n))\n for digit in factStr:\n sum += int(digit)\n return sum", "def Sum_Numbers_x_Power_Digits(x):\n totalSum = 0 \n for i in xrange(10, 999999):\n if i == sum([int(j)**x for j in str(i)]):\n totalSum += i\n return totalSum", "def calculateFactorials():\n\n ni = []\n ni.append( 295232799039604140847618609643520000000) # 34!\n ITERATIONS = 34\n for n in range( 1, ITERATIONS,1 ) :\n ni.append(math.floor(ni[n - 1] / n))\n print( \"\\n \".join([\"xi = (xi * _x) >> PRECISION;\\n res += xi * %s;\" % hex(int(x)) for x in ni]))", "def factorial(x):\r\n res = 1\r\n for i in range (1, x+1)\r\n res *= i\r\n return res", "def factorial(x):\r\n output = 1\r\n for factor in range(2,x+1):\r\n output = output * factor\r\n return output", "def obtain_factorial(x):\n product = 1\n for ii in list(range(x)):\n product = product * (ii + 1)\n\n return(product)", "def factorial(x):\n value = 1\n for i in range(2, add(x, 1)):\n value = multiply(value, i)\n return value", "def calculate(x: int) -> int:\n\n digits = list(map(int, list(str(x))))\n return sum(list(map(lambda a: a**2, digits)))", "def fact(n):\n return float(misc.factorial(n, True))", "def calcularfactorial(n):\r\n fact = 1\r\n for i in range(1, n+1): ## El valor inicial 1 es para que no arranque desde 0 si no desde 1. El valor final es n+1 xq el valor final del range nunca esta incluido\r\n fact = fact * i ## Multiplicamos el fact por el i. I va a valer lo que devuelva el range: 1,2,3,4 etc. Vamos a multiplicar los valores fact partiendo de 1 por todos los valores a recorrer\r\n return fact", "def factorial(x):\n ans = 1\n for i in range(x, 1, -1):\n ans *= i\n return ans", "def fact(n):\n\treturn int(factorial(n, exact=True))", "def foo_6(x): ## calculate the factorial of x in a different way\n\tfacto=1\n\twhile x>=1:\n\t\tfacto=facto*x\n\t\tx=x-1\n\treturn facto", "def digit_sum(x):\n s = 0\n while x>0:\n s = s+(x%10)\n x = x//10\n\n return s", "def Factorial(x):\n # 0) SECURITY CHECK\n if not isinstance(x, int):\n raise ValueError( \"'Factorial' function only accepts integers\" )\n\n # 1) COMPUTE THE FACTORIAL\n if x == 0 or x == 1:\n return 1\n else:\n return functools.reduce(lambda x, y: x * y, range(1, x + 1))", "def digits(x):\n \n if type(x) != int: \n print(\"ERROR <- x in factorial(x) is not type int\")\n return\n \n return [int(i) for i in list(str(x))]", "def task17_factorial(num):\n result = 1\n for i in range(1, num + 1):\n result *= i\n return result", "def factorial(n):\n return reduce(mul, range(1, n), 1)", "def twenty():\r\n \r\n n = 100\r\n factorial = 1\r\n sum = 0\r\n \r\n while n > 0:\r\n factorial *= n\r\n n -= 1\r\n \r\n for c in str(factorial):\r\n sum += int(c)\r\n \r\n return sum", "def factorial(n):\n \n result = 1\n\n for i in range(1,n+1):\n result *= i\n\n return result", "def factorial(n):\n\n # the code for factorial", "def get_factorial_digits(limit):\r\n\r\n factorials = [1]\r\n\r\n digits = [1]\r\n for num in range(1, limit + 1):\r\n new_digits = []\r\n carry = 0\r\n for digit in digits:\r\n digit_product = digit * num + carry\r\n new_digits.append(int(str(digit_product)[-1]))\r\n if digit_product >= 10:\r\n carry = int(str(digit_product)[:-1])\r\n else:\r\n carry = 0\r\n if carry:\r\n new_digits.extend([int(i) for i in reversed(str(carry))])\r\n factorials.append(len(new_digits))\r\n digits = new_digits\r\n\r\n return factorials", "def factorial(n):\n result = 1\n for i in range(1, n + 1):\n result *= i\n return result", "def factorial(n):\n result = 1\n for x in range(2, n + 1):\n result = result * x\n\n return result", "def Factorial(n):\n\tx = 1\n\tfor i in range(1, n + 1):\n\t\tx *= i\n\treturn x", "def factorial(n: int) -> int:\n result = 1\n for i in range(1, n+1):\n result *= i\n return result", "def factorial_trailing_zero(n):\n\n count = 0\n idx = 5\n while(n/idx >= 1):\n count += math.floor(n/idx)\n idx *= 5\n\n return count", "def sumDigit():", "def sum_of_digits(n):\n return sum(int(c) for c in str(n))", "def factorial(n):\n if not n>=0:\n \traise ValueError('n must be >=0')\n if math.floor(n)!=n:\n \traise ValueError('n must be exact integer')\n if n+1==n:\n \traise OverflowError(\"n too large\")\n result=1\n factor=2\n while factor<=n:\n \tresult*=factor\n \tfactor+=1\n return result", "def factorial(x):\n factorial = x\n if x <= 1:\n return 1\n else:\n while (x > 1):\n x -= 1\n factorial = multiply(factorial, x)\n return factorial", "def factorial(n):\n\tf = 1\n\tfor i in range(1,n+1):\n\t\tf = f*i\n\n\treturn f", "def factorial(n):\n ret = 1\n for i in range(2, n+1):\n ret *= i\n return ret", "def factorial(n):\r\n temp = 1\r\n for item in range(2, n+1):\r\n temp *= item\r\n return temp", "def factorial(number):\n result = 1\n while number:\n result *= number\n number -= 1\n return result", "def factorial(n):\n if isinstance(n, int) or isinstance(n, float):\n if n <= 1:\n return 1.0\n else:\n return n * factorial(n-1)\n else:\n res = 1.0\n for ni in n:\n res *= factorial(ni)\n return res", "def factorial_recursion(n):\n pass # @todo -fix this", "def factorial(n):\n if n == 0:\n return 1\n else:\n return reduce((lambda x, y: x * y), range(1, n + 1))", "def factorial(n: int) -> int:\n _compute_factorial(n)\n return _factorial_sequence[n]", "def sum_of_digits_in_number(n: int) -> int:\n return sum(int(digit) for digit in str(n))", "def sum_digits(n):\n sum = 0\n while n > 0:\n num = n % 10\n sum += num\n n //= 10\n return sum", "def digitSum ( n ) :\n return sum ( map ( int , str ( n ) ) )", "def factorial(n):\n return product(range(1, n + 1))", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def solution(x):\n if x is None:\n return x\n return total_digits(x)", "def digit_sum(n):\n s = 0\n while n:\n s += n % 10\n n //= 10\n return s", "def divisor_sum(x):\n factors = factorint(x)\n primes = factors.keys()\n powers = factors.values()\n sums = list()\n for i, p in enumerate(list(primes)):\n sums.append(sum([math.pow(p, x) for x in range(list(powers)[i] + 1)]))\n return int(reduce_mul(sums)) - x", "def factorial_loop(n):\n\n pass # @todo -fix this", "def factorial(x):\n\n # check if input value is negative or positive\n if x < 0:\n return print(\"Factorials do not exist for negative numbers.\")\n else:\n y = 1\n for i in range(1, x + 1):\n y = y * i\n return y", "def factorial_trailing_zeroes(n):\n zeroes = 0\n for i in range(5, n + 1, 5):\n while i % 5 == 0:\n zeroes += 1\n i /= 5\n return zeroes", "def factorial(n):\n if n < 0:\n raise ValueError(\"n cannot be negative\")\n result = 1\n for i in range(1, n + 1):\n result = multiply(result, i)\n return result", "def factorial(n: int):\n # base case, reduce must have non-empty list\n if n <= 0:\n return 0\n # use reduce function to multiple elements\n return reduce(lambda x, y: x * y, range(1,n+1))", "def fac(n:int) -> int :\n\n factorial = 1\n while n >= 1:\n factorial *= n\n n -= 1\n return factorial", "def factorial(fac_1):\r\n\tcontador = 1\r\n\tfor i in range(1,fac_1 + 1):\r\n\t\tcontador = (contador)*(i)\r\n\treturn contador", "def sum_digits(n):\n num = n\n incTen = 1\n summy = 0\n if num > 10:\n while incTen * 10 < num:\n incTen = incTen * 10\n while incTen >= 10:\n summy += num // incTen\n num = num % incTen\n incTen = incTen // 10\n summy += num\n return summy\n elif num == 10:\n return 1\n else:\n return num", "def calculate_factorial(num: int, factorial_value: int):\n if num <= 1:\n return factorial_value\n\n return calculate_factorial(num - 1, num * factorial_value)", "def factorial(k):\n fact = 1\n for i in range(1, k + 1):\n fact *= i\n return fact", "def factorial(n):\n if n < 0:\n raise Exception(f\"n! for n < 0: n = {n}\")\n prod = 1\n for i in range(1, n+1):\n prod *= i\n return prod", "def factorial(n):\n if isinstance(n, int) == False:\n raise TypeError('n is not integer; n value is {} and n type is {}'.format(n, type(n)))\n if n < 0:\n raise Exception('n must be greater than or equal to zero; n value is {}'.format(n))\n if n == 0:\n return 1\n result = 1\n for i in range(1, n+1):\n result = result * i\n return result", "def digital_sum(n):\n r = 0\n while n:\n r, n = r + n % 10, n // 10\n return r", "def sum_digits(n):\n digits = [int(i) for i in str(n)]\n return sum(digits)", "def sum_digits(n):\n \"*** YOUR CODE HERE ***\"\n count=0\n length=len(str(n))\n last=0\n sum=0\n while count<length:\n last=n%10\n n//=10\n sum+=last\n count+=1\n return sum", "def sum_factors(n):\n return sum(f*n[f] for f in n)", "def task_factorial():\n # set range of factorials here\n lo, hi = 0, 11\n user_digit = get_int(lo, hi) \n solution = n_factorial(user_digit) \n print(\"The factorial of %d is %d\" % (user_digit, solution))", "def factorial(x):\n fact = gamma(x+1)\n return fact", "def factorial(n: int) -> int:\n if n <= 1:\n return 1\n else:\n return n * factorial(n-1)", "def sum_of_digits(n):\n rest_of_num, last_num = split(n)\n if rest_of_num < 10:\n \treturn last_num + rest_of_num\n return last_num + sum_of_digits(rest_of_num)", "def factorial(x):\n if x == 1:\n return x\n else:\n return x * factorial(x - 1)", "def factorial(number):\n if number == 0:\n return 1\n return number * factorial(number - 1)", "def digit_sum(n):\n\treturn sum(int(c) for c in str(n))", "def solve(n=1000):\r\n return str(sum(x**x for x in range(1, n + 1)))[-10:]", "def factorial(n):\r\n if n == 1:\r\n return 1\r\n return n * factorial(n - 1)", "def factorial(x):\n if x == -0.5:\n return math.pi**0.5\n elif x > 0 and x-0.5 == int(x-0.5):\n return x*factorial(x-1)\n else:\n return math.factorial(x)", "def factorial(n):\n if n == 0 or n == 1:\n return n\n return n * factorial(n-1)", "def ndigits(x):\n if type(x) != int: # basic error handling of non-int\n print('The value of x is not an integer')\n return None\n if abs(x) < 10:\n return 1 # single digit case\n x_recursive = int((x - x % 10)/10) # remove last digit using modulo\n return 1 + ndigits(x_recursive)", "def factorial(n):\n if n == 0:\n return 1\n return n * factorial(n - 1)", "def factorial(n):\n if n == 0:\n return 1\n return n * factorial(n - 1)", "def factorial(n: int) -> int:\n assert n >= 0\n if n == 0:\n return 1\n if n == 1 or n == 2: # pylint: disable=consider-using-in\n return n\n return n * factorial(n - 1)", "def factorial(n: int):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)", "def sum_digits(n):\n if (n < 10):\n return n\n else:\n all_but_last, last = split(n)\n return sum_digits(all_but_last) + last", "def factorial(n: int):\n return 1 if n == 0 else factorial(n-1)*n", "def fact(n):\n answer = 1\n while n > 1:\n answer *= n\n n -= 1\n return answer", "def factorial(n):\n if n == 1:\n return 1\n\n return n * factorial(n - 1)", "def fn(n):\n if n == 0: return 1\n return sum(fn(i)*fn(n-i-1) for i in range(n))", "def get_factorial(number):\n if number == 1:\n return 1\n else:\n return number * get_factorial(number - 1)", "def factorial (n):\n if n == 1:\n return 1\n \n \n else:\n return n * factorial(n - 1)", "def factorial(number):\n\n if number == 1:\n return number\n\n return number * factorial(number-1)", "def _compute_factorial(n: int) -> None:\n\n fact_count = len(_factorial_sequence)\n\n # have the terms up to n! already been computed?\n if n < fact_count:\n return\n\n # compute numbers iteratively from existing sequence\n product = _factorial_sequence[-1]\n for i in range(fact_count, n + 1):\n product *= i\n _factorial_sequence.append(product)", "def combinations(n) -> float:\r\n c = math.factorial(n) / (math.factorial(2) * math.factorial(n - 2))\r\n return c", "def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)", "def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)", "def factorial(N: int) -> int:\n return N*factorial(N-1) if N else 1", "def factorial(n):\n product = 1\n for i in range(n, 1, -1):\n product *= i\n return product", "def test_factorial():\n import math\n for number in range(500):\n assert factorial(number) == math.factorial(\n number), \"Could not calculate the factorial of {}\".format(number)", "def add_digits(n):\n return sum([int(d) for d in str(n)])", "def sum_of_digit_powers_in_number(n: int, power: int) -> int:\n return sum(int(digit) ** power for digit in str(n))", "def calculate_digits_sum(number: int) -> int:\n\n # Return value\n ret = 0\n\n while number != 0:\n # Extract the last digit number and add it to ret\n ret += number % 10\n\n # Delete the last digit of the number\n number //= 10\n\n return ret", "def factorial(n):\n if n == 1:\n return n\n else:\n return n*factorial(n-1)", "def digits(n, base=10):\n if n == 0:\n return 1\n\n n = abs(n)\n if base != 10:\n digits = math.log(n, base)\n else:\n digits = math.log10(n)\n return int(digits) + 1", "def _factorial(self, n, cache=[1, 1]):\r\n if n < len(cache):\r\n return cache[n]\r\n else:\r\n last = len(cache) - 1\r\n total = cache[last]\r\n\r\n for i in range(last + 1, n + 1):\r\n total *= i\r\n cache.append(total)\r\n\r\n return total" ]
[ "0.7923695", "0.75797725", "0.75408524", "0.7531802", "0.7449112", "0.73583555", "0.7314269", "0.7309004", "0.7186043", "0.7158655", "0.71404076", "0.7107824", "0.70873946", "0.70631707", "0.7045362", "0.7043718", "0.7035779", "0.70124704", "0.6972245", "0.6952173", "0.6941045", "0.6922352", "0.69098485", "0.69054556", "0.69030523", "0.68970525", "0.68852866", "0.6874781", "0.6865312", "0.6847531", "0.6839392", "0.68275917", "0.6799887", "0.67896354", "0.67842466", "0.67742074", "0.67578", "0.674113", "0.6726172", "0.6714583", "0.67117923", "0.66964", "0.6687498", "0.6674035", "0.66720694", "0.66712606", "0.6668017", "0.66629875", "0.66548616", "0.6638521", "0.6634087", "0.6633231", "0.6625492", "0.66182727", "0.6595376", "0.65760344", "0.6573062", "0.65697414", "0.6568366", "0.6567582", "0.6540747", "0.6509279", "0.65077525", "0.6489906", "0.6488933", "0.6477583", "0.6477013", "0.64714235", "0.64708835", "0.64677805", "0.645072", "0.6446253", "0.6439364", "0.6435721", "0.64342123", "0.6427472", "0.6427472", "0.6415301", "0.6414136", "0.64005005", "0.6385865", "0.6369786", "0.636188", "0.6358329", "0.63550955", "0.63536006", "0.6349985", "0.63319236", "0.6331493", "0.63298345", "0.63298345", "0.63276595", "0.63241553", "0.6321924", "0.63209385", "0.63167226", "0.6314949", "0.63109636", "0.62959677", "0.6290196" ]
0.8542544
0
Show portfolio of stocks
def index(): user_id = session.get('user_id') table_name = f'stocks_user{user_id}' db.execute("CREATE TABLE IF NOT EXISTS ? (stock_symbol TEXT NOT NULL, shares NUMBER NOT NULL, price NUMBER NOT NULL, time TEXT NOT NULL)", table_name) money = db.execute("SELECT dinheiro FROM users WHERE id = ?", user_id)[0]['dinheiro'] total_value_in_stocks = 0 rows = db.execute('SELECT DISTINCT stock_symbol FROM ? WHERE NOT stock_symbol="DINHEIRO" GROUP BY stock_symbol HAVING SUM(shares) >= 1', table_name) for row in rows: row["company_name"] = lookup(row["stock_symbol"])['name'] row["price_stock"] = lookup(row["stock_symbol"])['price'] row["shares"] = db.execute("SELECT SUM(shares) FROM ? WHERE stock_symbol = ?", table_name, row["stock_symbol"])[0]["SUM(shares)"] total_value_in_stocks += row["shares"] * row["price_stock"] portfolio_value = total_value_in_stocks + money return render_template('index.html', rows=rows, money=money, portfolio_value=portfolio_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index():\n stocks = []\n username = session.get(\"username\")\n symbol_list = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n cash_balance = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n total_value = cash_balance\n\n for sym in symbol_list:\n symbol = sym[\"stock_symbol\"]\n new_stock = Stock(username, symbol)\n stocks.append(new_stock)\n total_value += new_stock.quantity * new_stock.price\n\n\n return render_template(\"portfolio.html\", stocks = stocks, cash_balance=usd(cash_balance), total_value=usd(total_value))", "def index():\n # Use a place holder ':curr_id' to call the session id which is the user's id\n rows = db.execute(\"SELECT stocks.symbol, stocks.name, portfolio.shares FROM portfolio JOIN users ON users.id = portfolio.user_id JOIN stocks ON portfolio.stock_id = stocks.id WHERE users.id==:curr_id\", curr_id=session[\"user_id\"])\n # Make a select query only on cash to be able to display it in portfolio's table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:curr_id\", curr_id=session[\"user_id\"])\n\n # gets the current price of each stock queried\n if rows:\n for r in rows:\n r_shares = r[\"shares\"]\n r_symbol = r[\"symbol\"]\n # run lookup function to get current price\n dict_2 = lookup(r_symbol)\n # Adds the key \"price\" and its value to the dictionary \"rows\"\n r[\"price\"] = dict_2[\"price\"]\n # Calculates the grand total (stocks’ total value plus cash)\n total = sum([r[\"price\"]*r[\"shares\"] for r in rows]) + row_cash[0][\"cash\"]\n return render_template(\"portfolio.html\", rows=rows, row_cash=row_cash, total=total)", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def history():\n \"\"\"Show portfolio of stocks\"\"\"\n all_rows = []\n rows = db.execute(\"SELECT * FROM history WHERE id = :id\",\n id=session['user_id'])\n if rows==None or len(rows) < 1:\n return render_template(\"history.html\", all_rows=all_rows)\n else:\n for row in rows:\n share_row = []\n share_row.append(row[\"symbol\"])\n share_row.append(row[\"shares\"])\n share_row.append(usd(row[\"price\"]))\n share_row.append(row[\"transacted\"])\n all_rows.append(share_row)\n return render_template(\"history.html\", all_rows=all_rows)", "def index():\n\n # Get user's cash\n user = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n\n # Get portfolio\n portfolios = db.execute(\"SELECT * FROM portfolios WHERE user_id = ?\", session[\"user_id\"])\n\n # Get symbol for each stock\n length = len(portfolios)\n for i in range(length):\n symbol = portfolios[i]['stocks']\n\n # Lookup stock price and add to portfolio information\n portfolios[i]['price'] = lookup(symbol)['price']\n portfolios[i]['total'] = float(portfolios[i]['price']) * portfolios[i]['shares']\n\n # Calculate total value of stocks\n value = 0\n for j in range(length):\n value += portfolios[j]['price']\n\n # Calculate grand total of stocks plus cash\n g_total = user[0][\"cash\"] + value\n\n return render_template(\"index.html\", portfolios=portfolios, cash=user[0][\"cash\"], g_total=g_total)", "def __display_portfolio(self, p, w):\n\n global st_sort_key\n global st_reverse_sort\n\n line = 1\n total_assets = 0\n total_change = 0\n\n p.assets.sort(key=st_sort_key, reverse=st_reverse_sort)\n\n for s in p.assets:\n # Make sure we have space to write the portfolio totals.\n if line >= (curses.LINES - 3):\n break\n\n total_assets += (p.asset_counts[s.symb()] * s.price())\n total_change += (p.asset_counts[s.symb()] * s.change())\n\n # Color red/green for stocks going up/down.\n change_color = curses.color_pair(0)\n if s.change() > 0:\n change_color = curses.color_pair(1)\n elif s.change() < 0:\n change_color = curses.color_pair(2)\n\n direction = ''\n if s.change() > 0:\n direction = u'\\u25b2'\n elif s.change() < 0:\n direction = u'\\u25bc'\n\n w.addstr(line, 0, '%-15s' % s.name()[0:14])\n w.addstr(line, 16, '%-5s' % s.symb(), curses.A_BOLD)\n w.addstr(line, 22, '%9.2f' % s.price())\n w.addstr(line, 32, direction.encode('utf-8'), change_color)\n w.addstr(line, 33, '%6.2f %5.2f%%' % (abs(s.change()),\n abs(s.change_percent()) *\n 100),\n change_color)\n w.addstr(line, 47, '|')\n w.addstr(line, 49, '%-6d' % p.asset_counts[s.symb()])\n w.addstr(line, 56, '%11.2f' % (p.asset_counts[s.symb()] *\n s.price()))\n w.addstr(line, 68, '%10.2f' % (p.asset_counts[s.symb()] *\n s.change()),\n change_color)\n\n line += 1\n\n line += 1\n\n # Get overall change (of assets) for the portfolio.\n overall_change = total_assets - p.cost_basis()\n overall_color = curses.color_pair(0)\n if overall_change > 0:\n overall_color = curses.color_pair(1)\n elif overall_change < 0:\n overall_color = curses.color_pair(2)\n\n # Color red/green for assets changing.\n change_color = curses.color_pair(0)\n if total_change > 0:\n change_color = curses.color_pair(1)\n elif total_change < 0:\n change_color = curses.color_pair(2)\n\n # Print accumulated stats for the portfolio.\n w.addstr(line, 0, 'Daily:')\n w.addstr(line, 8, '$%.2f' % total_change,\n curses.A_BOLD | change_color)\n w.addstr(line, 23, 'Total:')\n w.addstr(line, 30, '$%.2f' % overall_change,\n curses.A_BOLD | overall_color)\n w.addstr(line + 1, 0, 'Assets:')\n w.addstr(line + 1, 8, '$%.2f' % total_assets)\n w.addstr(line + 1, 23, 'Cash: $%.2f' % p.cash)\n w.addstr(line + 1, 44, 'Total value:')\n w.addstr(line + 1, 58, '$%.2f' % (p.cash + total_assets),\n curses.A_BOLD)", "def portfolio_view(request):\n\n try:\n query = request.dbsession.query(Stock)\n user_entries = query.filter(Stock.account_id == request.authenticated_userid)\n except DBAPIError:\n return DBAPIError(DB_ERR_MSG, content_type='text/plain', status=500)\n\n return {'stocks': all_entries}", "def index():\n user_id = session[\"user_id\"]\n portfolio_table = port(user_id, db)\n \n if not isinstance(portfolio_table, dict): \n return apology(\"Error in portfolio\")\n \n return render_template(\"portfolio.html\",\n shares_list = portfolio_table[\"shares\"],\n cash = portfolio_table[\"cash\"],\n total = portfolio_table[\"total\"])", "def index():\n\n #select user's portfolio\n rows = db.execute(\"SELECT * FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n\n #set temporary holding place for cash to zero\n tcash = 0\n\n #update the stock information in user's portfolio\n for row in rows:\n stock = row[\"stock\"]\n number = row[\"number\"]\n quote = lookup(stock)\n total = float(number) * float(quote[\"price\"])\n tcash += total\n db.execute(\"UPDATE portfolio SET price=:price, total=:total WHERE userid=:id AND stock=:stock AND number=:number\", price=usd(quote[\"price\"]), total=total, id=session[\"user_id\"], stock=stock, number=number)\n\n #select user's cash and updated portfolio\n updated_cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n tcash += updated_cash[0][\"cash\"]\n updated_stock = db.execute(\"SELECT stock, SUM(number) AS number, price, SUM(total) AS stock_total FROM portfolio WHERE userid=:id GROUP BY stock HAVING SUM(number) > 0\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", stocks=updated_stock, cash=usd(updated_cash[0][\"cash\"]), all_total=usd(tcash))", "def display_portfolio(self, p):\n\n if self.terminate:\n return\n\n w = self.windows['MAIN']\n\n self.clear_main()\n self.__display_portfolio(p, w)\n self.clear_header()\n self.set_header(p)\n\n self.refresh()", "def portfolio_detail():\n return render_template('portfolio/portfolio.html')", "def index():\n def getListOfCompanies(username, symbolOrPriceOrNumber):\n if symbolOrPriceOrNumber == \"symbol\" or symbolOrPriceOrNumber == \"price\" or symbolOrPriceOrNumber == \"number\":\n rows = db.execute(\"SELECT {0} FROM portfolio WHERE username=:username\".format(symbolOrPriceOrNumber), username=username)\n if symbolOrPriceOrNumber == \"symbol\" and len(rows) >= 1:\n namesList = []\n for row in rows:\n namesList.append(lookup(row[symbolOrPriceOrNumber])[\"name\"])\n return namesList\n elif symbolOrPriceOrNumber == \"price\" and len(rows) >= 1:\n pricseList = []\n for row in rows:\n pricseList.append(row[symbolOrPriceOrNumber])\n return pricseList\n elif symbolOrPriceOrNumber == \"number\" and len(rows) >= 1:\n numbersList = []\n for row in rows:\n numbersList.append(row[symbolOrPriceOrNumber])\n return numbersList\n else:\n return None\n else:\n return None\n\n def getTotalValueHolding(username):\n priceRow = db.execute(\"SELECT price FROM portfolio WHERE username=:username\", username=username)\n numberRow = db.execute(\"SELECT number FROM portfolio WHERE username=:username\", username=username)\n\n if len(priceRow) >= 1 and len(numberRow) >= 1 and len(priceRow) == len(numberRow):\n totalList = []\n for i in range(len(priceRow)):\n totalList.append(float(priceRow[i][\"price\"]) * float(numberRow[i][\"number\"]))\n\n return totalList\n\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n companiesNames = getListOfCompanies(username, \"symbol\")\n numberOfShares = getListOfCompanies(username, \"number\")\n prices = getListOfCompanies(username, \"price\")\n totalValueHolding = getTotalValueHolding(username)\n\n currentCashBalance = db.execute(\"SELECT cash FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n total = 0\n if totalValueHolding:\n for totalValue in totalValueHolding:\n total = total + totalValue\n\n cashAndStocksTotalValue = float(currentCashBalance) + total\n\n return render_template(\"index.html\", username=username, companiesNames=companiesNames, numberOfShares=numberOfShares,\n prices=prices, totalValueHolding=totalValueHolding, currentCashBalance=currentCashBalance, cashAndStocksTotalValue=cashAndStocksTotalValue)", "def index():\n\n rows = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n users = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n cash = users[0][\"cash\"]\n total = 0\n\n for row in rows:\n symbol = row[\"symbol\"]\n shares = row[\"shares\"]\n stock = lookup(symbol)\n price_t = float(stock[\"price\"]) * shares\n db.execute(\"UPDATE portfolio SET price=:price WHERE id=:id AND symbol=:symbol\",\n price=float(stock[\"price\"]), id=session[\"user_id\"], symbol=row[\"symbol\"])\n total += price_t\n\n TOTAL = total + cash\n return render_template(\"index.html\", rows=rows, cash=usd(cash), TOTAL=usd(TOTAL))", "def index():\n\n # obtain cash info from users database\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])\n grandtotal = cash[0][\"cash\"]\n \n # obtain stock info from portfolio database\n stocks = db.execute(\"SELECT symbol, shares FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n \n # for every stock in the user's portfolio, assign dict key/values for use in html/jinja\n for stock in stocks:\n symbol = str(stock[\"symbol\"])\n shares = int(stock[\"shares\"])\n name = \"\"\n price = \"\"\n total = \"\"\n quote = lookup(symbol)\n stock[\"name\"] = quote[\"name\"]\n stock[\"price\"] = \"{:.2f}\".format(quote[\"price\"])\n stock[\"total\"] = \"{:.2f}\".format(quote[\"price\"] * shares)\n stock[\"grandtotal\"] = quote[\"price\"] * shares\n grandtotal += stock[\"grandtotal\"]\n\n # format grandtotal to force 2 decimal places\n grandtotal = \"{:.2f}\".format(grandtotal)\n \n # render index page with some given values\n return render_template(\"index.html\", stocks = stocks, cash = cash, grandtotal = grandtotal)", "def index():\n stocks = db.execute(\"SELECT Symbol, Company, SUM(NumberOfShares) AS Shares, UnitPrice, SUM(TotalPrice) AS TotalPrice FROM \"\n \"portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n\n symbol = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n\n balance = cash[0][\"cash\"]\n grandTotal = 0\n for stock in stocks:\n grandTotal = grandTotal + stock[\"TotalPrice\"]\n\n grandTotal = grandTotal + balance\n\n return render_template(\"index.html\", stockList=stocks, cash=balance, totalAssets=grandTotal, currentUser=session.get(\"user_id\"))", "def index():\n # Establish userID.\n userID = session[\"user_id\"]\n # Isolate all results from portfolio table for the current user.\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id=:userID\", userID=session[\"user_id\"])\n # Cash for current user (first row, cash column)\n cash = db.execute(\"SELECT cash FROM users WHERE id=:userID\", userID=userID)[0][\"cash\"]\n # Empty list to store stock data as iterating through rows.\n stockData = []\n # Set total for combined stoc value to 0.\n totalAllStocks = 0\n\n # Iterate over rows from portfolio and allocate a row for each stock that has more than 0 owned.\n for row in portfolio:\n if row[\"numOwned\"] != 0:\n stockData.append(row)\n\n # Iterate over rows in stock data and provide value for each column. Other values for use in html are already in list from previous loop.\n # Had to play around with usd, once in usd is a str rather than float so usd always has to be post calculations.\n for row in stockData:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"currentPrice\"] = usd(stock[\"price\"])\n row[\"total\"] = usd(row[\"numOwned\"] * stock[\"price\"])\n totalAllStocks += row[\"numOwned\"] * stock[\"price\"]\n # Grand Total is combined stock values and cash value.\n grandTotal = totalAllStocks + cash\n # Return index.html input sources.\n return render_template(\"index.html\", stockData=stockData, cash=usd(cash), totalAllStocks = usd(totalAllStocks), grandTotal=usd(grandTotal))", "def history():\n \n #select user's portfolio\n rows = db.execute(\"SELECT stock, number, trans_price, transaction_stamp FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def stock(request, *args, **kwargs):\n\n mode = 'lines'\n xaxis_title = 'Years'\n date_list = []\n open_list = []\n close_list = []\n low_list = []\n high_list = []\n ticker = request.GET.get('ticker', '')\n year = request.GET.get('year', '')\n month = request.GET.get('month', '')\n\n if month.isdigit():\n month = int(month)\n\n data = Stock.objects.filter(ticker__iexact=ticker).order_by('date')\n if year and year.isdigit():\n if month and month in MONTHS:\n data = data.filter(Q(date__year=year,\n date__month=month))\n xaxis_title = f'{MONTHS[month]} {year}'\n else:\n data = data.filter(Q(date__year=year))\n xaxis_title = year\n\n if not ticker or not data.exists():\n return HttpResponseRedirect('/stocks')\n title = f'{ticker} ({year})' if year else f'{ticker}'\n if data.exists():\n xy_data = data.values('date', 'oopen', 'close', 'low', 'high')\n for item in xy_data:\n date_list.append(item['date'])\n open_list.append(item['oopen'])\n close_list.append(item['close'])\n low_list.append(item['low'])\n high_list.append(item['high'])\n\n figure = {'data': [\n Scatter(x=date_list, y=high_list, mode=mode, name='high',\n opacity=0.8, marker_color='green'),\n Scatter(x=date_list, y=low_list, mode=mode, name='low',\n opacity=0.8, marker_color='red', visible='legendonly'),\n Scatter(x=date_list, y=open_list, mode=mode, name='open',\n opacity=0.8, marker_color='blue', visible='legendonly'),\n Scatter(x=date_list, y=close_list, mode=mode, name='close',\n opacity=0.8, marker_color='orange', visible='legendonly'),\n ], 'layout': {'title': {'text': title, 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center', 'yanchor': 'top'},\n 'yaxis_title': \"Value\", 'xaxis_title': xaxis_title\n }}\n\n plot_div = plot(figure, output_type='div')\n return render(request, \"index.html\", context={'plot_div': plot_div})", "def history():\n # Select stock info for every single stock transaction for the respective user\n rows = db.execute(\"SELECT symbol, shares, price, transacted FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Return template with the list that has each stock transaction info\n return render_template(\"history.html\", rows=rows)", "def render_investip():\n\tlinewidth = 2\n\n\tst.sidebar.markdown('# Dashboard')\n\tstock = st.sidebar.selectbox('Stock:', stocks)\n\n\tstartdd = datetime.datetime(2020, 3, 1)\n\tstartdd = st.sidebar.date_input('start-date', value=startdd)\n\n\tendd = datetime.datetime.now()\n\tendd = st.sidebar.date_input('end-date', value=endd)\n\n\tt0 = stock\n\tt0_ohlc = extract(ticker=t0, start_date=startdd, end_date=endd)\n\tt0_df = pd.DataFrame({f'{t0}-Close': t0_ohlc.Close})\n\n\t# st.write(t0_ohlc)\n\tmpf.plot(t0_ohlc, type='candle',volume=True,show_nontrading=False, title=t0, figscale=1.)\n\t# tdf = plot_ticker(t0, df=t0_df, start_date=startdd, end_date=endd)\n\tst.pyplot()\n\n\n\tst.sidebar.markdown('## Stock Correlation')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_corrs')\n\tif stock_returns:\n\t\tst.markdown('## Stock Correlation')\n\t\tstock_selection = st.sidebar.multiselect('Stocks', stocks, def_stocks)\n\t\tplot_stock_correlations(stock_selection, startdd, endd)\n\t\tst.pyplot()\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Returns')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_returns')\n\tif stock_returns:\n\t\tst.markdown('## Stock Returns')\n\t\tst.markdown('''### Daily Stock returns\n[EWMA](https://www.investopedia.com/articles/07/ewma.asp)''')\n\t\tspan = st.sidebar.slider('span', 2, 21, value=5)\n\t\tplot_historical(t0, t0_ohlc, span=span, linewidth=linewidth)\n\t\tst.pyplot()\n\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Volatility')\n\ttrading_context = st.sidebar.checkbox('Enable', value=False, key='cb_volatility')\n\tif trading_context:\n\t\tst.markdown('## Volatility & Risk')\n\t\tst.markdown('''### Daily differences between High & Low\nWe model these ranges with [Inverse Gamma PDF](https://en.wikipedia.org/wiki/Inverse-gamma_distribution).\nGreen lines denote +/- 1 stdev.\n''')\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} High-Low Daily')\n\t\tmmd = t0_ohlc.High - t0_ohlc.Low\n\t\t# mmd.dropna(inplace=True)\n\t\tmmd.plot(color='r', ax=ax[0], lw=linewidth)\n\n\t\tmu, sigma = mmd.dropna().mean(), mmd.dropna().std()\n\t\tzval = 1.#96\n\t\t# TODO: try one-tail limit to get outliers\n\t\t_=ax[0].axhline(y=mu, color='k', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu-zval*sigma, color='g', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu+zval*sigma, color='g', lw=linewidth)\n\n\t\tp95 = mmd.dropna().quantile(.95)\n\t\t_=ax[0].axhline(y=p95, color='b', lw=linewidth, label='p95')\n\t\t_=ax[1].axvline(p95, color='b', lw=linewidth, label='p95')\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t print(invgamma.fit(mmd))\n\t\t sns.distplot(mmd, fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(mmd.values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(mu, color='k', label='mean', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\t\tst.markdown('''### Daily Average True Range (ATR)\nImplementation follows [ATR](https://kodify.net/tradingview/indicators/average-true-range/).\nCheck [Investopedia](https://www.investopedia.com/terms/a/atr.asp) for more info.''')\n\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-High-Low': t0_ohlc.High - t0_ohlc.Low,\n\t\t\tf'{t0}-High-PrevCloseAbs': abs(t0_ohlc.High - t0_ohlc.Close.shift(1)),\n\t\t\tf'{t0}-Low-PrevCloseAbs': abs(t0_ohlc.Low - t0_ohlc.Close.shift(1)),\n\t\t}).max(axis=1)\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-true-range': atr_df,\n\t\t})\n\t\tatr_df[f'{t0}-ATR14'] = atr_df.iloc[:, 0].rolling(14).mean()\n\t\t# st.write(atr_df)\n\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} True Range & SMA14')\n\t\tatr_df.plot(ax=ax[0], lw=linewidth)\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t #print(invgamma.fit(f'{t0}-true-range'))\n\t\t sns.distplot(atr_df[f'{t0}-true-range'], fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(atr_df[f'{t0}-true-range'].values[-1], color='b', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(atr_df[f'{t0}-ATR14'].values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\n\n\t# do_strategy_analysis = True\n\tst.sidebar.markdown('## Trading Strategy')\n\tdo_strategy_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_stra')\n\tif do_strategy_analysis:\n\t\tst.markdown('## Trading Strategy')\n\t\tst.markdown('[investopedia](https://www.investopedia.com/articles/active-trading/052014/how-use-moving-average-buy-stocks.asp)')\n\t\tshort_window = st.sidebar.slider('short_window', 2, 21, 3)\n\t\tlong_window = st.sidebar.slider('long_window', 3, 50, 5)\n\t\tplot_strategy(t0, t0_df, short_window, long_window)\n\t\tst.pyplot()\n\n\t# do_corr_analysis = False\n\tst.sidebar.markdown('## Correlation analysis')\n\tdo_corr_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_corr')\n\tif do_corr_analysis:\n\t\tst.markdown('## Correlation analysis')\n\t\tt1= 'GC=F' # # SP500 'GC=F'\n\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\tt1 = st.sidebar.selectbox('REF1:', stocks, index=stocks.index(t1))\n\t\tt2 = st.sidebar.selectbox('REF2:', stocks, index=stocks.index(t2))\n\t\tif st.sidebar.button('Reset'):\n\t\t\tt1 = 'GC=F' # # SP500 'GC=F'\n\t\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\t\t# t1 = st.sidebar.selectbox('ref1:', stocks, index=stocks.index(t1))\n\t\t\t# t2 = st.sidebar.selectbox('ref2:', stocks, index=stocks.index(t2))\n\n\t\t@st.cache(persist=True, show_spinner=False)\n\t\tdef get_dataframes(t1, t2, startdd, endd):\n\t\t\tt1_ohlc = extract(ticker=t1, start_date=startdd, end_date=endd)\n\t\t\tt2_ohlc = extract(ticker=t2, start_date=startdd, end_date=endd)\n\t\t\treturn t1_ohlc, t2_ohlc\n\n\t\tt1_ohlc, t2_ohlc = get_dataframes(t1, t2, startdd, endd)\n\t\tt1_df = pd.DataFrame({f'{t1}-Close': t1_ohlc.Close})\n\t\tt2_df = pd.DataFrame({f'{t2}-Close': t2_ohlc.Close})\n\n\t\t#print(t0_ohlc.shape)\n\t\t#t0_ohlc.head()\n\t\t# print(t1_ohlc.shape)\n\t\t# ticker_ohlc.head()\n\t\t# ticker_ohlc.info()\n\n\t\ttdf = t0_df.join(t1_df).join(t2_df).interpolate().dropna()\n\t\t# tdf.head(10)\n\n\t\t# t0_ohlc.corr(t1_ohlc)\n\t\t#ax = t0_ohlc.Close.plot()\n\t\t#t1_ohlc.Close.plot(ax=ax)\n\n\t\timport numpy as np\n\t\tprint('glocal corrleation1: ', t0_ohlc.Close.corr(t1_ohlc.Close))\n\t\tprint('glocal corrleation2: ', t0_ohlc.Close.corr(t2_ohlc.Close))\n\n\t\tp_window_size = 5\n\t\tr_window_size = 5\n\t\tcentering = False\n\n\n\t\tmodf = lambda x: x\n\t\t#modf = np.log10\n\n\n\t\tmain_stat = f'[{t0}]-mean-roll{p_window_size}'\n\t\talt_stat_1 = f'[{t1}]-mean-roll{p_window_size}'\n\t\talt_stat_2 = f'[{t2}]-mean-roll{p_window_size}'\n\t\t# df_rc = pd.DataFrame({\n\t\t# main_stat : tdf.iloc[:, 0].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_1: tdf.iloc[:, 1].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_2: tdf.iloc[:, 2].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# })\n\t\tcom_val = 0.2\n\t\tdf_rc = pd.DataFrame({\n\t\t main_stat : tdf.iloc[:, 0].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_1: tdf.iloc[:, 1].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_2: tdf.iloc[:, 2].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t})\n\n\t\tdf_rc = df_rc.interpolate()\n\t\tdf_rc[f'[{t0}]-[{t1}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_1])\n\t\tdf_rc[f'[{t0}]-[{t2}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_2])\n\n\t\tf, ax = plt.subplots(3,1,figsize=(16,10),sharex=True)\n\t\t#df_rc.iloc[:,0].plot(ax=ax[0], legend=True)\n\t\tdf_rc.iloc[:,1].plot(ax=ax[0], legend=True, color='gold')\n\t\tdf_rc.iloc[:,2].plot(ax=ax[1], legend=True, color='darkred')\n\t\tdf_rc.iloc[:,3].plot(ax=ax[2], legend=True, color='gold')\n\t\tdf_rc.iloc[:,4].plot(ax=ax[2], legend=True, color='darkred')\n\t\tax[2].axhline(y=0, lw=1, color='black')\n\t\t#t0_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[0])\n\t\t#t1_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[1])\n\t\t# ax[0].set(xlabel='Frame',ylabel='Smiling Evidence')\n\t\t# ax[1].set(xlabel='Frame',ylabel='Pearson r')\n\t\t_=plt.suptitle(f\"{t0} Close rolling correlation to {t1}, {t2}\")\n\n\t\tst.pyplot()\n\n\n\t\tf,ax=plt.subplots(1, 2, figsize=(16,8),sharex=False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[1],\n\t\t y=df_rc.columns[2],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=None,\n\t\t ax=ax[0])\n\n\t\tprint(df_rc.columns)\n\t\tnewr_p = df_rc.iloc[-1, 0]\n\t\tt1_p = df_rc.iloc[-1, 1]\n\t\tt2_p = df_rc.iloc[-1, 2]\n\t\tt1_c = df_rc.dropna().iloc[-1, 3]\n\t\tt2_c = df_rc.dropna().iloc[-1, 4]\n\t\tprint('current_corr:', (t1_c, t2_c))\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, 1].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle = plt.Circle((t1_p, t2_p), xradius, color='r', fill=False)\n\t\tax[0].add_artist(circle)\n\t\t#ax[0].set_xlabel(f'GOLD Price {t1_p:.4f}')\n\t\t#ax[0].set_ylabel(f'OIL Price {t2_p:.4f}')\n\t\t# ax[0].legend().set_visible(False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[-2],\n\t\t y=df_rc.columns[-1],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=True,\n\t\t #linestyle=\n\t\t ax=ax[1])\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, -2].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle1 = plt.Circle((t1_c, t2_c), xradius, color='r', fill=False)\n\t\tax[1].add_artist(circle1)\n\t\t#ax[1].set_ylabel('OIL Correlation')\n\t\t#_= ax[1].set_xlabel('GOLD Correlation')\n\n\n\t\tst.pyplot()", "def stock_view(request):\n if request.method == 'GET':\n try:\n symbol = request.GET['symbol']\n except KeyError:\n return {}\n try:\n response = requests.get(API_URL + '/stock/{}/company'.format(symbol))\n data = response.json()\n return {'company': data}\n except ValueError:\n raise HTTPNotFound()\n if request.method == 'POST':\n try:\n symbol = request.POST['symbol']\n except KeyError:\n raise HTTPBadRequest()\n\n try:\n response = requests.get(API_URL + '/stock/{}/company'.format(symbol))\n data = response.json()\n except ValueError:\n raise HTTPNotFound()\n\n isntance = Stock(**data)\n\n try:\n request.dbsession.add(instance)\n except DBAPIError:\n return Response(DB_ERR_MSG, content_type='text/plain', status=500)\n \n return HTTPFound(location=request.route_url('portfolio'))", "def history():\n\n # obtain stock info from portfolio database\n history = db.execute(\"SELECT symbol, shares, price, date FROM history WHERE id = :id ORDER BY date DESC\", id=session[\"user_id\"])\n \n # for every stock in the user's portfolio, assign dict key/values for use in html/jinja\n for transaction in history:\n symbol = transaction[\"symbol\"]\n shares = transaction[\"shares\"]\n price = transaction[\"price\"]\n date = transaction[\"date\"]\n\n return render_template(\"history.html\", history = history)", "def index():\n # Selects stock that user actually has\n stockuserhas = db.execute(\n \"SELECT symbol, shares FROM portfolio WHERE userid = :userid GROUP BY symbol HAVING SUM(shares) > 0\", userid=session[\"user_id\"])\n # Finds the amount of money user has to spend on stocks\n amount = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n # The virst value in the array is the amount of money user can spend\n money = amount[0][\"cash\"]\n # If the user does not have any stocks, return index using with just money as input\n if not stockuserhas:\n return render_template(\"index.html\", money=money, completetotal=money)\n\n # Selects summarative information for each symbol\n stocks = db.execute(\n \"SELECT SUM(total), symbol, SUM(shares), name FROM portfolio WHERE userid = :userid GROUP BY symbol\", userid=session[\"user_id\"])\n # For each symbol, add the current price of the stock to the end of the dictionary\n for stock in stocks:\n # Looks up current price of stock based on symbol\n stockinfo = lookup(stock[\"symbol\"])\n # Finds current value of stock\n currentprice = float(stockinfo[\"price\"])\n # Adds the price to the dictionary\n stock.update({\"price\": currentprice})\n\n # The total value of stocks user owns\n totalstockvalue = db.execute(\"SELECT SUM(total) FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Total amount a user owns is the cash they have plus the sum of the stocks\n completetotal = float(money + float(totalstockvalue[0]['SUM(total)']))\n # Return index.html with all of the information put together above\n return render_template(\"index.html\", completetotal=completetotal, money=money, stocks=stocks)", "def portfolio(self):\n self.update_portfolio()\n return self._immutable_portfolio", "async def list(self, ctx, user=None, date=None):\n if not user:\n user = ctx.message.author\n else:\n user = util.GetUserFromNameStr(ctx.message.server.members, user)\n change = GetPortfolioChange(user.id)\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n await self.bot.say(\n '```%s\\'s portfolio:\\n'\n 'Total Value: $%s (%.2f%s) \\n'\n '%s```' % (user, portfolio.Value(), change, \"%\", portfolio.AsTable()))", "def history():\n\n #Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT * FROM history WHERE id = :id\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n return render_template(\"history.html\", ports=ports)", "def index():\n\n rows = db.execute(\n 'SELECT symbol, SUM(CASE WHEN operation = \"SELL\" THEN -shares ELSE shares END) shares FROM transactions WHERE id = :id GROUP BY symbol;', id=session['user_id'])\n\n cash = db.execute('SELECT cash FROM users WHERE id = :id', id=session['user_id'])[0]['cash']\n\n grand_total = cash\n\n for row in rows:\n stock = lookup(row['symbol'])\n\n row['name'] = stock['name']\n row['price'] = stock['price']\n row['total'] = row['shares'] * stock['price']\n\n grand_total += row['shares'] * stock['price']\n\n rows.append({\n 'symbol': 'CASH',\n 'cash': cash,\n 'total': grand_total\n })\n\n return render_template('index.html', stocks=rows)", "def print_portfolio(self):\n self.__validate_google_credentials()\n sheet = self.service.spreadsheets()\n result = sheet.values().get(spreadsheetId=self.google_spreadsheet_id,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('pulled data:')\n print('----------------')\n print('ALL VALUES\\n', '-----------------\\n', values)\n print('ONLY PRICES\\n', '----------------')\n print('{:25} {}'.format('name', 'price'))\n \n for row in values:\n if len(row) < 8 or row[2] != '[OWN]':\n continue\n else:\n print('{:25} {}'.format(row[0], row[6]))", "def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)", "def index():\n#Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY symbol\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n #Hold value is the sum of the shares * price of each shares in the portfolios PLUS the remaining cash.\n if grand_total_fl != None:\n hold_value = grand_total_fl + remaining_cash\n #Update hte current hold value of the user\n db.execute(\"UPDATE users SET hold_value = :hold_value WHERE id = :id\", id=session[\"user_id\"], hold_value=hold_value)\n else:\n hold_value = remaining_cash\n\n\n #Query for the symbol in the database for the specific user.\n rows = db.execute(\"SELECT symbol, stock_price FROM portfolio WHERE id = :id GROUP by symbol\", id=session[\"user_id\"])\n\n #Initiate a list for all the open prices of stocks of a certain user.\n price_open = []\n num_stocks = []\n symbol_list = []\n avg_open_list = []\n profit_loss_list = []\n price_today_list = []\n\n\n for i in range(len(rows)):\n print(rows[i]['symbol'])\n symbol = rows[i]['symbol']\n open_price = rows[i]['stock_price']\n print(rows[i]['stock_price'])\n stock = lookup(rows[i]['symbol'])\n price_today = stock['price']\n\n #Insert data into the price_open list\n price_open.insert(i, open_price)\n\n #Count the number of stocks in posession\n share_total = ports[i]['sharetotal']\n\n #Insert data into the num_stocks list\n num_stocks.insert(i, share_total)\n\n #Insert data into the symbol_list list\n symbol_list.insert(i, symbol)\n\n #Insert data into the price_today_list\n price_today_list.insert(i, price_today)\n\n #Compute for the average open price of all stocks of a certain user.\n total_price = ports[i]['total']\n avg_open = total_price/share_total\n avg_open_list.insert(i, avg_open)\n\n profit_loss = ((price_today - avg_open)/avg_open)*100\n\n profit_loss_list.insert(i, (profit_loss))\n\n\n db.execute(\"UPDATE portfolio SET price_today = :price_today, profit_loss = :profit_loss, avg_open = :avg_open WHERE symbol = :symbol AND id = :id\", price_today=price_today, symbol=symbol,profit_loss=profit_loss, avg_open=avg_open, id=session[\"user_id\"])\n\n\n print(\"The symbols are:\", symbol_list)\n print(\"The quantity are: \", num_stocks)\n print(\"The open prices are: \", price_open)\n print(\"The average open prices are: \", avg_open_list)\n print(\"The prices today are: \", price_today_list)\n print(\"The profit and loss are: \", profit_loss_list)\n\n return render_template(\"index.html\", ports=ports, remaining_cash = remaining_cash, hold_value=hold_value,)", "def get_stock(self, investor):\n\n # Find out the stock details \n sym, qty, price = investor.portfolios[0].portfolios[0]\n # p = investor.portfolios[0]\n \n # Check if broker has a portfolio\n if self.portfolios[0]:\n self.portfolios[0].add_stock(sym, qty, price)\n else:\n # Broker doesn't have a portfolio\n p = Portfolio()\n #logging.info(\"p is: %s\" % p)\n p.add_stock(sym, qty, price)\n self.add_portfolio(p)\n logging.info(\"Broker's portfolios AFTER addition: %s\" % self)\n # logging.info(\"WHAT ARE YOU\")\n logging.info(\"Investor portfolio BEFORE removal: %s\" % investor.portfolios[0].portfolios)\n investor.portfolios[0].remove_stock(sym, qty)\n logging.info(\"Investor portfolio AFTER removal: %s\" % investor.portfolios[0])\n # investor.portfolios[0].portfolios.remove( (sym, qty, price) )\n \n # investor.portfolios[0].remove(sym, qty, price)\n total_price = qty * price\n investor.portfolios[0].value -= total_price\n investor.cash += qty * float(price)", "def stocks(request):\n\n try:\n stocks = StockList.objects.all()\n except StockList.DoesNotExist:\n stocks = None\n\n context = {\n 'title': 'Filter Stocks',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stocks': stocks,\n }\n\n return render(\n request,\n 'app/stocksview.html',\n context,\n )", "def display_artist_available_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_available_artwork_from_one_artist(artist_name)\n if results:\n for piece in results:\n print(piece)\n else:\n print('Sorry this artist does not have any available art at this time ')\n else:\n print('Sorry, no artwork from this artist to display ')", "def stock():\n stock=stock_data('AAPL',start(2019,12,1))\n return stock", "def index():\n symbols = db.execute(\"SELECT symbol FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n companies = db.execute(\"SELECT company FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n get_shares = db.execute(\"SELECT SUM(shares) FROM History WHERE id = :id GROUP BY symbol\", id=session['user_id'])\n shares = [share['SUM(shares)'] for share in get_shares]\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n return render_template(\"index.html\", symbols_companies_shares=zip(symbols, companies, shares), lookup=lookup, cash=cash)", "def index():\n\n # Get user\n user = session[\"user_id\"]\n\n # Query infos from database\n rows = db.execute(\"SELECT * FROM stocks WHERE user_id = :user\", user=user)\n cash = db.execute(\"SELECT cash FROM users WHERE id = :user\", user=user)[0]['cash']\n total_cash = cash\n\n # Populate stocks list wit hstock data\n stocks = []\n for index, row in enumerate(rows):\n stock_data = lookup(row['symbol'])\n stock_data['amount'] = row['amount']\n stock_data['quantity'] = round(stock_data['price'] * stock_data['amount'], 2)\n\n # Generate index table data\n stocks.append(list((\n stock_data['symbol'],\n stock_data['name'],\n stock_data['amount'],\n stock_data['price'],\n stock_data['quantity']\n )))\n total_cash += stocks[index][4]\n\n return render_template(\"index.html\", stocks=stocks, cash=round(cash, 2), total=round(total_cash, 2))", "def portfolio(request):\n projects = Project.objects.all()\n categories = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n projects = projects.filter(category__name__in=categories)\n categories = ProjectCategory.objects.filter(name__in=categories)\n\n context = {\n 'projects': projects,\n 'current_categories': categories,\n }\n\n return render(request, 'portfolio/portfolio.html', context)", "def backtest_portfolio(self):\n raise NotImplementedError(\"Should implement backtest_portfolio()!\")", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def history():\n\n user = session.get(\"user_id\")\n rows = db.execute(\"Select TransDate as Date, Stock, Price, case when Num < 0 then 'Sell' else 'Buy' end as Type, Num as Quantity from portfolio where User = :User order by Date asc\", User = session.get(\"user_id\"))\n\n\n return render_template(\"hist.html\", rows = rows)", "def graph(stock):\n output=stock_price(stock)\n return plt.plot(output)", "def stock():\n # Grab the inputs arguments from the URL\n # This is automated by the button\n args = flask.request.args\n\n # Get all the form arguments in the url with defaults\n if 'company' in args.keys() and args['company']:\n company = args['company']\n else:\n company = 'GOOG'\n\n cl = requests.get(\"https://www.quandl.com/api/v3/datasets/WIKI/%s.json?order=asc&rows=31&start_date=2015-07-01&end_date=2015-09-03\" % (company))\n if cl.status_code == 200:\n \tc2=cl.content\n \tstock=simplejson.loads(c2)\n \tabb=stock['dataset']['dataset_code']\n \tdatanames=stock['dataset']['column_names']\n \tdata=stock['dataset']['data']\n \tdataorg=pd.DataFrame(data,columns=datanames)\n \tdataorg['Date']=pd.to_datetime(dataorg['Date'])\n else:\n ######## THIS IS NOT RECOMMENDED, because now it just returns an error message if not find the ticker.\n return 'Error! Ticker does not exist!'\n\n\n # Create a graph\n fig = figure(x_axis_type=\"datetime\")\n fig.line(dataorg.Date,dataorg.Close)\n fig.title=\"Stock closing price (%s), from 07-01-2015 \" % (company)\n # fig.xaxis_axis_label='Date'\n # fig.yaxis_axis_label='Price'\n\n # Configure resources to include BokehJS inline in the document.\n # For more details see:\n # http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#module-bokeh.resources\n plot_resources = RESOURCES.render(\n js_raw=INLINE.js_raw,\n css_raw=INLINE.css_raw,\n js_files=INLINE.js_files,\n css_files=INLINE.css_files,\n )\n\n # For more details see:\n # http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components\n script, div = components(fig, INLINE)\n html = flask.render_template(\n 'embed.html',\n plot_script=script, plot_div=div, plot_resources=plot_resources,\n # color=color,\n company=company\n )\n return encode_utf8(html)", "def stock(request, stock_id):\n stock= Stock.objects.get(id=stock_id)\n entries= stock.entry_set.order_by('-date_added')\n context= {'stock': stock, 'entries': entries}\n return render(request, 'stock_trackers/stock.html', context)", "def home():\n stocks = preprocess()\n\n return render_template(\"main.html\",stocks=stocks)", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def stock_volume_history(stock_values):\n ticker = stock_values.name\n dates = stock_values.index\n \n # stock volume plot \n p2hover = HoverTool(tooltips=[(\"volume\", \"$y\"),])\n\n p = figure(x_axis_type = \"datetime\")\n\n p.title = \"{} Daily Volume\".format(ticker)\n p.title_text_font_size = '12'\n p.title_text_font_style = 'bold'\n\n # x axis\n p.xaxis.axis_label = 'Date'\n p.xaxis.axis_label_text_font_size = '9'\n\n # y axis\n p.yaxis.axis_label = 'Kilo Transactions'\n p.yaxis.axis_label_text_font_size = '9'\n p.yaxis[0].formatter = PrintfTickFormatter(format=\"%3d\")\n\n p.quad(top=stock_values['Volume'], bottom=0, left=dates, right=dates,\n fill_color=\"#036564\", line_color=\"#033649\")\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 30'],\n color='#dfbd4d', **line_style)\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 300'],\n color='#df1b06', **line_style)\n\n # set plot style\n p.plot_width = 800\n p.plot_height = 200\n p.grid.grid_line_alpha=0.3\n\n # set grid\n # change just some things about the x-grid\n p.xgrid.grid_line_color = None\n\n # change just some things about the y-grid\n p.ygrid.grid_line_alpha = 0.5\n p.ygrid.grid_line_dash = [6, 4]\n\n return p", "def stockButtonClicked(self):\n # Clear text edit box and get the stock symbol from combobox.\n self.central.text3.clear()\n stocksymbol = self.central.combobox.currentText()\n\n URL = 'https://finance.yahoo.com/quote/{0}/profile?p={0}'.format(stocksymbol)\n\n # Safely get the web page using the above URL.\n try:\n r = requests.get(URL)\n except:\n logging.error(\"Failed to get the web page: \" + URL)\n self.central.text3.setText(\"Failed to get the web page: \" + URL)\n return\n\n # Safely turn the response from requests into soup.\n try:\n html = r.text.encode('utf-8')\n soup = bs4.BeautifulSoup(html, 'lxml')\n except:\n logging.error(\"Failed on the soup\")\n self.central.text3.setText(\"Failed on the soup\")\n return\n\n # Safely extract data from the table.\n try:\n table = soup.find_all(\"table\")\n rows = table[0].find_all('tr')\n data = []\n for row in rows:\n cols = row.find_all('td')\n cols = [str.text.strip() for str in cols]\n data.append([str for str in cols if str])\n\n textdisplay = ''\n\n for x in data:\n for y in x:\n print(y)\n textdisplay += y\n textdisplay += '\\n'\n if y.isdigit():\n textdisplay += '\\n'\n self.central.text3.setText(textdisplay)\n\n except:\n logging.error(\"Failed to extract data from the table\")\n self.central.text3.setText(\"Failed to extract data from the table\")\n return\n\n self.updateGraph(symbol=stocksymbol)", "def get_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n date = request.args.get('date')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if not util.is_valid_date_string(date):\n return util.build_json_response(\"Not a valid date of the form YYYY-MM-DD\")\n\n following_date = util.add_days_to_date(date, 1)\n equities = db.session.query(Portfolio.ticker, func.sum(Portfolio.quantity))\\\n .filter(Portfolio.user_id == user_obj.id) \\\n .filter(Portfolio.transaction_date <= following_date) \\\n .group_by(Portfolio.ticker).all()\n\n result = dict()\n for equity in equities:\n result[equity[0]] = equity[1]\n\n return util.build_json_response(\"Portfolio retrieved\", equities=result)", "def index():\n user_name = db.execute(\"SELECT username FROM users WHERE id = ?\", session[\"user_id\"])\n check = db.execute(\"SELECT name FROM main.sqlite_master WHERE type='table'\")\n #print(check)\n #print('stocks' not in check[0]['name'])\n if not any(c['name'] == 'stocks' for c in check):\n return render_template(\"index.html\", user_name=user_name)\n\n stocks = db.execute(\"SELECT * FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n total_value = user_name[0][\"cash\"]\n sum_stocks = db.execute(\"SELECT symbol, ammount FROM stocks WHERE user_id = ?\", session[\"user_id\"])\n\n for stock in sum_stocks:\n total_value += stock[\"ammount\"] * lookup(stock[\"symbol\"])['price']\n \n #print(stocks)\n return render_template(\"index.html\", stocks=stocks, user_name=user_name, cash=usd(cash[0]['cash']), total_value=usd(total_value))", "def index():\n\n if request.method == \"GET\":\n\n current_user = session[\"user_id\"]\n current_cash=db.execute(\"SELECT cash FROM users WHERE id = :id\", id=current_user)\n\n # portfolio_table=\"\"\n table_symbols=[]\n table_volumes=[]\n table_share_price=[]\n table_stock_name=[]\n table_total_value=[]\n\n rows=db.execute(\"SELECT stock_symbol,volume FROM portfolio WHERE id = :id\", id=current_user)\n for row in rows:\n symbol=row[\"stock_symbol\"]\n table_symbols.append(str(symbol))\n\n table_volumes.append(row[\"volume\"])\n\n lookedup=lookup(row[\"stock_symbol\"])\n table_share_price.append(lookedup.get(\"price\"))\n table_stock_name.append(lookedup.get(\"name\"))\n\n table_total_value.append(int(lookedup.get(\"price\"))*int(row[\"volume\"]))\n\n # at this point we have lists with stock_symbols, amounts, prices and stock names just need to generate the code for portfolio table\n\n # for row in table_symbols:\n # y=0\n # portfolio_table+=\"<tr><td>\"+str(table_stock_name[y])+\"</td><td>\"+str(table_symbols[y])+\"</td><td>\"+str(table_volumes[y])+\"</td><td>\"+str(table_share_price[y])+\"</td></tr>\"\n # y+=1\n # not sure if this is going to insert into index.html correctly\n\n current_cash=int(current_cash[0][\"cash\"])\n current_total_value=current_cash\n\n for i in range(len(table_volumes)):\n\n volume=int(table_volumes[i])\n price=int(table_share_price[i])\n current_total_value+= volume*price\n\n return render_template(\"index.html\", current_cash=current_cash, table_symbols=table_symbols,table_volumes=table_volumes,table_share_price=table_share_price,table_stock_name=table_stock_name, table_total_value=table_total_value,current_total_value=current_total_value)\n\n else:\n # dont think ill be posting with index\n return apology(\"Should this even exist, how did u get here?\")", "def index():\n userid = session[\"user_id\"]\n stocks = db.execute(\"SELECT symbol FROM purchase WHERE userid = :userid GROUP BY symbol\",\n userid=userid)\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=userid)\n grand_total = cash[0][\"cash\"]\n if stocks != []:\n storages = list()\n for symbol in stocks:\n stock_data = lookup(symbol[\"symbol\"])\n current_price = stock_data[\"price\"]\n stock_info = dict()\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase WHERE userid = :userid\\\n GROUP BY symbol HAVING symbol = :symbol\", userid=userid, symbol=symbol[\"symbol\"])\n current_shares = shares_info[0][\"shares_sum\"]\n if current_shares > 0:\n stock_info[\"symbol\"] = symbol[\"symbol\"]\n stock_info[\"name\"] = stock_data[\"name\"]\n stock_info[\"price\"] = usd(current_price)\n stock_info[\"shares\"] = current_shares\n total = current_price * current_shares\n grand_total += total\n stock_info[\"total\"] = usd(total)\n storages.append(stock_info)\n return render_template(\"index.html\", storages=storages, cash=usd(cash[0][\"cash\"]), grand_total=usd(grand_total))\n else:\n return render_template(\"index.html\", cash=usd(cash[0][\"cash\"]), grand_total=usd(grand_total))\n return render_template(\"index.html\")", "def display_artist_complete_portfolio(artist_name):\n if controls_utils.artist_has_work_in_db(artist_name):\n results = artwork_db.get_all_artwork_from_one_artist(artist_name)\n for piece in results:\n print(piece)\n else:\n print('Sorry, no artwork from this artist to display ')", "def index():\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n transactions=[]\n grand_total = 0\n for row in rows:\n stock = lookup(row[\"Symbol\"])\n transactions.append({\n \"Symbol\": stock[\"symbol\"],\n \"Name\": stock[\"name\"],\n \"Shares\": row[\"totalShares\"],\n \"Price\": usd(stock[\"price\"]),\n \"Total\": usd(stock[\"price\"] * row[\"totalShares\"])\n })\n grand_total += stock[\"price\"] * row[\"totalShares\"]\n rows = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"cash\"]\n return render_template(\"table.html\", transactions=transactions, cash=usd(cash), grand_total=usd(grand_total))", "def index():\n # query database to get cash on hand\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n\n # query database to get current holdings from transactions list\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n\n # assign names and totals for stocks\n for stock in stocks:\n stock_lookup = lookup(stock[\"symbol\"])\n stock[\"name\"] = stock_lookup[\"name\"]\n stock[\"total\"] = stock[\"shares\"] * stock_lookup[\"price\"]\n\n stocks[:] = [stock for stock in stocks if stock.get(\"shares\") > 0]\n\n totals = user_cash + sum([stock[\"total\"] for stock in stocks])\n\n return render_template(\"index.html\", user_cash=user_cash, stocks=stocks, total=totals, usd=usd)", "def sell():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = row[0]['username']\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n if not result:\n return apology('no symbol available')\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n old_shares = row[0]['shares']\n \n if shares > old_shares:\n return apology('number exceeds available shares')\n \n new_shares = old_shares - shares\n \n if new_shares == 0:\n db.execute(\"DELETE FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n \n quote = lookup(symbol)\n price = quote['price']\n total_p = price * shares\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n old_cash = row[0]['cash']\n \n new_cash = old_cash + total_p\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=new_cash, id=session['user_id'])\n \n #current_time = time.strftime(time.localtime(\"%H:%M:%S %m/%d/%Y\"))\n current_time = time.asctime( time.localtime(time.time()) )\n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=0-shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"sell.html\")", "def history():\n # extract history of operation for a particular user\n historical_data = db.execute(\"SELECT Symbol, Company, Shares, Price, Total, Timestamp FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", historical=historical_data)", "def analyze(request, *args, **kwargs):\n\n mode = 'lines+markers'\n\n tickers = Stock.objects.distinct(\n 'ticker').values_list('ticker', flat=True)\n tickers_dict = {ticker: [] for ticker in tickers}\n tickers_count = tickers.count()\n\n actual_dates = Stock.objects.values('date').annotate(\n dcount=Count('date')).filter(dcount=tickers_count).values_list(\n 'date', flat=True).order_by('date')\n date_list = list(actual_dates)\n\n data = Stock.objects.filter(date__in=actual_dates).order_by('date')\n\n for item in data.values('ticker', 'close', 'oopen'):\n tickers_dict[item['ticker']].append(\n round((item['close']-item['oopen'])*100/item['oopen'], 2)\n )\n\n scatters = [Scatter(x=date_list, y=tickers_dict[obj], mode=mode, name=obj,\n opacity=0.8, visible='legendonly') for obj in tickers_dict]\n figure = {'data': scatters, 'layout': {\n 'title': {\n 'text': 'Open-Closed comparision', 'y': 0.9, 'x': 0.5,\n 'xanchor': 'center','yanchor': 'top'},\n 'yaxis_title': \"Daily percent\",\n 'xaxis_title': \"Years\",\n }}\n\n return render(request, \"analyze.html\", context={\n 'plot_div': plot(figure, output_type='div')})", "async def stocks(self, ctx):\n\t\tpass", "def index():\n holdings = db.execute(\"SELECT symbol, amount FROM stocks WHERE stocks.user_id = :userid AND amount != 0\", userid = session[\"user_id\"])\n total = 0\n\n for row in holdings:\n sDict = lookup(row['symbol'])\n row['name'] = sDict['name']\n row['share_total'] = sDict['price'] * row['amount']\n total += row['share_total']\n\n row['price'] = usd(sDict['price'])\n row['share_total'] = usd(row['share_total'])\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session[\"user_id\"])\n cash = cash[0][\"cash\"]\n total += cash\n\n return render_template(\"index.html\", holdings=holdings, cash=usd(cash), total=usd(total))", "def summary(self):\n print '%s Portfolio\\'s %s Strategy' % (self.portfolio.name, self.name)\n print '-' * COL_DASH_WIDTH\n\n self.display_trades()\n\n for symbol in self.portfolio.assets.keys():\n perf = self.performance[symbol]\n\n print '\\nSummary for %s from %s (first trade) to %s (last trade)' % (symbol, perf['start'], perf['end'])\n print '.' * COL_DASH_WIDTH\n print 'Summary:'\n data = [[fmtn(perf['trades']), fmtn(perf['wins']), fmtn(perf['losses']), fmtn(perf['washes'])]]\n print tabulate.tabulate(data, headers=['Total Trades', '# Wins', '# Losses', '# Washes'])\n\n print '\\nPerformance:'\n data = [[\n fmtn(perf['profit']), fmtn(perf['loss']), fmtn(perf['net_profit']),\n fmtp(perf['profit_factor']), fmtp(perf['percent_profitable']), fmtn(perf['average_trade_net_profit'])\n ]]\n print tabulate.tabulate(data, headers=['Profit', 'Loss', 'Net Profit', 'Profit Factor', 'Percent Profitable', 'Average Net Profit per Trade'])\n\n print '\\nDrawdown:'\n data = [[fmtn(perf['max_drawdown']), fmtn(perf['average_drawdown']), fmtn(perf['max_drawdown_days']), fmtn(perf['average_drawdown_days'])]]\n print tabulate.tabulate(data, headers=['Max', 'Average', 'Max Days', 'Average Days'])\n\n print '\\nRisk:'\n data = [[fmtn(perf['volatility_risk']), fmtn(perf['beta']), fmtn(perf['lower_partial_moment_risk']), fmtn(perf['t_r']), fmtn(perf['s_r'])]]\n print tabulate.tabulate(data, headers=['Volatility', 'Beta', 'Lower Partial Moment', 'Treynor Ratio', 'Sharpe Ratio'])", "def index():\n inventory = db.execute(\"SELECT symbol,quantity FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n cash = float(db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])[0][\"cash\"])\n total = cash\n for i in inventory:\n stock = lookup(i[\"symbol\"])\n i[\"price\"] = stock[\"price\"]\n i[\"name\"] = stock[\"name\"]\n i[\"total\"] = usd(stock[\"price\"] * i[\"quantity\"])\n total += stock[\"price\"] * i[\"quantity\"]\n return render_template(\"index.html\", context={\"inventory\":inventory,\"total\":usd(total),\"cash\":usd(cash)})", "def action(self, history_to_date, stocks, portfolio):\n return {}", "def index():\n\n stocks_owned = db.execute(\"SELECT DISTINCT stock FROM transaction WHERE id = :id;\", id=session['user_id'])\n\n number_of_rows= len(stocks_owned) - 1\n\n i = 0\n\n total_value=0\n\n for stock in stocks_owned:\n\n stock_list=[]\n stock_list[i]=stock\n\n value = db.execute(\"SELECT SUM(total_amount) FROM transaction WHERE id = :id GROUP BY stock HAVING stock=:stock\", id=session['usestockr_id'], stock=stocks_owned[\"stock\"])\n value_list=[]\n value_list[i] = value\n\n amount_owned = db.execute(\"SELECT SUM(amount) FROM transaction WHERE id = :id GROUP BY stock HAVING stock=:stock\", id=session['user_id'], stock = stocks_owned[\"stock\"])\n amount_list=[]\n amount_list[i]= amount_owned\n\n quote_input = stocks_owned[i]\n quote_info = lookup(quote_input)\n price = quote_info['price']\n price_list=[]\n price_list[i] = price\n\n\n total_value+=value\n\n i+=1\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id;\", id=session['user_id'])\n\n grand_total = total_value + cash\n\n ###(\"SELECT stock, SUM(total_amount) FROM transaction WHERE id = :id;, id=session['user_id'] GROUP BY stock\")####\n\n\n return render_template(\"index.html\", number_of_rows=number_of_rows, stock_list=stock_list, amount_list=amount_list, value_list=value_list, price_list=price_list, total_value=total_value, grand_total=grand_total)", "def new_portfolio_flow(portfolio_list):\n while True:\n portfolio_name = prompt.shortcuts.input_dialog(\n title=\"Portfolio Name\", text=\"Please type the portfolio name:\"\n ).run()\n if portfolio_name is not None:\n portfolio_id: int = len(portfolio_list)\n stock_list = []\n stock_list = add_stock_flow(stock_list)\n portfolio_list.append(Portfolio(portfolio_name, portfolio_id, stock_list))\n return portfolio_list\n if portfolio_name is None:\n return None", "def history():\n rows = db.execute(\"SELECT stock_id, stocks.symbol, price, shares, date FROM history JOIN stocks ON history.stock_id=stocks.id WHERE user_id=:user_id\", user_id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def index():\n #if request.method == \"GET\":\n #Выбрать знак акции,и кол-во акции которые пренадлежат id\n #stocks_shares = db.execute(\"SELECT symbol, shares FROM total WHERE id=:id ORDER BY symbol\",\n #id=session[\"user_id\"])\n #return render_template(\"index.html\")\n #return redirect(url_for(\"index.html\"))\n return apology(\"TODO\")", "def select_stock_object(portfolio_list, portfolio_selected):\n portfolio_stock_choice = prompt.shortcuts.radiolist_dialog(\n values=[(x, x.name) for x in\n [x.stock_list for x in portfolio_list if x.portfolio_id == portfolio_selected][0]],\n title=\"Portfolio Overview\",\n text=\"Please select a stock:\",\n ).run()\n return portfolio_stock_choice", "def history():\n\n rows = db.execute('SELECT operation, symbol, shares, price, date FROM transactions WHERE id = :id',\n id=session['user_id'])\n\n return render_template('history.html', stocks=rows[::-1])", "def history():\n if request.method == \"GET\":\n \n user_id = int(session.get('user_id'))\n user_data = db.execute('''SELECT * FROM history WHERE user_id = :user_id''', user_id = user_id)\n \n if not user_data:\n return render_template('quote.html')\n \n #create lists of values for sake of returning them to F2E\n portfolio = []\n \n for i in user_data:\n #getting data from table\n date = i.get('date')\n symbol = i.get('symbol')\n name = i.get('stock_name')\n quantity = i.get('quantity')\n price = round(float(i.get('price')), 2)\n action = str(i.get('deal'))\n \n #inserting data into a list\n a_dict = {\n 'date': date, 'symbol': symbol, \n 'name': name, 'price': price, \n 'quantity': quantity, 'action': action\n }\n portfolio.append(a_dict)\n \n return render_template('history.html',\n portfolio=portfolio)\n else:\n return render_template('index.html')", "async def stock(self, ctx, ticker: str):\n symbols = await self.bot.aiojson(\"https://api.robinhood.com/quotes/\"\\\n f\"?symbols={ticker.upper()}\")\n if not symbols:\n await ctx.send(\"Stock not found. This stock is probably not tradeable on robinhood.\")\n return\n symbols_result = symbols[\"results\"][0]\n instrument = await self.bot.aiojson(symbols_result[\"instrument\"])\n fundamentals = await self.bot.aiojson(\n f\"https://api.robinhood.com/fundamentals/{ticker.upper()}/\")\n\n current_price = (symbols_result[\"last_trade_price\"] if\n \"last_extended_hours_trade_price\" in symbols_result\n else symbols_result[\"last_extended_hours_trade_price\"])\n diff = Decimal(Decimal(current_price) -\n Decimal(symbols_result[\"previous_close\"]))\n percentage = str(100 * diff / Decimal(current_price))[:6]\n\n if not percentage.startswith(\"-\"):\n percentage = \"+\" + percentage\n\n current_price_string = self.format_currency(current_price)\n diff_string = self.format_currency(diff)\n bid_price_string = self.format_currency(Decimal(symbols_result[\"bid_price\"]))\n ask_price_string = self.format_currency(Decimal(symbols_result[\"ask_price\"]))\n tradeable_string = (\n \":white_check_mark:\" if instrument[\"tradeable\"] else \":x:\")\n\n update_timestamp = parser.parse(symbols_result[\"updated_at\"])\n\n symbol = symbols_result[\"symbol\"]\n change_color = await self.get_stock_change_color(symbol)\n\n embed = discord.Embed(title=f\"{symbol}'s stocks info\",\n color=change_color,\n timestamp=update_timestamp)\n\n embed.add_field(name=\"Name\", value=instrument[\"name\"])\n embed.add_field(name=\"Current Price\", value=current_price_string)\n embed.add_field(name=\"Change from yesterday\", value=f\"{diff_string} ({percentage}%)\")\n embed.add_field(name=\"Bid size\", value=f\"{symbols_result['bid_size']} ({bid_price_string})\")\n embed.add_field(name=\"Ask size\", value=f\"{symbols_result['ask_size']} ({ask_price_string})\")\n embed.add_field(name=\"Current Volume\", value=fundamentals[\"volume\"])\n embed.add_field(name=\"Average Volume\", value=fundamentals[\"average_volume\"])\n embed.add_field(name=\"Tradeable on Robinhood\", value=tradeable_string)\n embed.add_field(name=\"Country\", value=f\":flag_{instrument['country'].lower()}:\")\n\n await ctx.send(embed=embed)", "def sell():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n \n # initiate variables\n shares = int(request.form.get(\"shares\"))\n stocks = []\n \n # obtain user's stock information from portfolio database\n stocks = db.execute(\"SELECT shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n # check that user actually owns enough stock, or any stock at all\n if stocks == []:\n return apology(\"you don't own any of this stock\")\n if shares > stocks[0][\"shares\"]:\n return apology(\"invalid number of shares\")\n \n # calculate price per share and cost of all shares\n price = round(float(quote[\"price\"]),2)\n cost = round(float(shares * price),2)\n \n # update user's cash balance\n db.execute(\"UPDATE users SET cash = cash + :cost WHERE id = :id\", cost = cost, id=session[\"user_id\"])\n \n # if there are still shares leftover after sale, update row\n if shares < stocks[0][\"shares\"]:\n db.execute(\"UPDATE portfolio SET shares = shares - :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], shares = shares, symbol = quote[\"symbol\"])\n \n # otherwise, if not shares leftover, remove row from portfolio entirely\n elif shares == stocks[0][\"shares\"]:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=-shares,price=price)\n \n flash('Sold!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"sell.html\")", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def get_portfolio_pnl(self):\n\n return self._portfolio", "def stock_value_history(stock_values, value_name='Close'):\n ticker = stock_values.name\n dates = stock_values.index\n \n # hover tool\n phover = HoverTool(tooltips=[(\"price\", \"$y\"),])\n\n # plot\n p = figure(x_axis_type = \"datetime\", tools=[\"pan,wheel_zoom,box_zoom,reset,resize\", phover])\n\n p.title = \"{} Closing Prices\".format(ticker)\n p.title_text_font_size = '12'\n p.title_text_font_style = 'bold'\n\n # x axis\n p.xaxis.axis_label = 'Date'\n p.xaxis.axis_label_text_font_size = '9'\n\n # y axis\n p.yaxis.axis_label = 'Price (US$)'\n p.yaxis.axis_label_text_font_size = '9'\n\n line1_name = value_name\n p.line(np.array(dates, 'M64'), stock_values[value_name], legend=value_name,\n color='#182b8b', **line_style)\n\n line1_name = 'SMA 30'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line1_name], legend=line1_name,\n color='#5477a0', **line_style)\n\n line2_name = 'SMA 100'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line2_name], legend=line2_name,\n color='#dfbd4d', **line_style)\n\n line3_name = 'SMA 300'\n p.line(np.array(stock_values.index, 'M64'), stock_values[line3_name], legend=line3_name,\n color='#df1b06', **line_style)\n\n # set plot style\n p.plot_width = 800\n p.plot_height = 300\n p.grid.grid_line_alpha=0.3\n\n # set grid\n # change just some things about the x-grid\n p.xgrid.grid_line_color = None\n\n # change just some things about the y-grid\n p.ygrid.grid_line_alpha = 0.5\n p.ygrid.grid_line_dash = [6, 4]\n\n # legend\n p.legend.orientation = \"bottom_left\"\n p.legend.label_text_font_size = '3'\n \n return p", "def display_stock(stock):\n print(\"Stock contains:-\")\n for typ in stock:\n print(typ + f\" {stock[typ]}\")", "def evaluate_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n date = request.args.get('date')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if not util.is_valid_date_string(date):\n return util.build_json_response(\"Not a valid date of the form YYYY-MM-DD\")\n\n following_date = util.add_days_to_date(date, 1)\n equities = db.session.query(Portfolio.ticker, func.sum(Portfolio.quantity))\\\n .filter(Portfolio.user_id == user_obj.id) \\\n .filter(Portfolio.transaction_date <= following_date) \\\n .group_by(Portfolio.ticker).all()\n\n e_total = 0\n for equity in equities:\n price = equity[1] * market_data.get_stock_price(equity[0], date, 'low')\n e_total += price\n\n total = round(e_total + user_obj.balance, 2)\n cash = round(user_obj.balance, 2)\n e_total = round(e_total, 2)\n\n return util.build_json_response(\"Portfolio totals retrieved\", equity_total=e_total, cash_balance=cash, account_total=total)", "def initialize_portfolio(self):\n\n raise NotImplementedError('''\n Must implement initialize_portfolio. Call help() for details.\n ''')", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def index_view(request):\n\n\t# Create blank form instances.\n\tform = TickerForm()\n\tcrypto_form = CryptoTickerForm()\n\t\n\t# Check if the request method == POST\n\tif request.method == 'POST':\n\t\tpost_data = request.POST or None\n\t\t# Check that ther is data on the request.\n\t\tif post_data != None:\n\t\t\t# Check if the user enters data and the stock ticker form.\n\t\t\tif request.POST.get(\"form_type\") == 'stock_form':\n\t\t\t\tform = TickerForm(request.POST)\n\t\t\t\t# Check if form is valid.\n\t\t\t\tif form.is_valid():\n\t\t\t\t\t# Get the 'ticker' value from the form and store it the ticker variable.\n\t\t\t\t\tticker = form.cleaned_data.get('ticker')\n\t\t\t\t\t# If the variable ticker exists in the users portfolio send error message.\n\t\t\t\t\ttry: \n\t\t\t\t\t\tif request.user.stocks_set.get(ticker=ticker) != None:\n\t\t\t\t\t\t\tmessages.info(request, 'Stock ticker already exists in portfolio.')\n\t\t\t\t\t# Create the Stock Object in the database and link it to the current user.\n\t\t\t\t\texcept Stocks.DoesNotExist:\n\t\t\t\t\t\tStocks.objects.create(\n\t\t\t\t\t\t\tticker = ticker, \n\t\t\t\t\t\t\tuser=request.user)\n\t\t\t\t\t\t# Get the stock that was created from the database.\n\t\t\t\t\t\tcurrent_stock = Stocks.objects.get(ticker=ticker, user=request.user)\n\t\t\t\t\t\t# Get the meta and price data\n\t\t\t\t\t\tcurrent_stock_meta_dict = current_stock.get_meta_data()\n\t\t\t\t\t\tcurrent_stock_price_dict = current_stock.get_price_data()\n\t\t\t\t\t\t# Add the highest price for the stock to the meta data dict\n\t\t\t\t\t\tcurrent_stock_meta_dict['high'] = current_stock_price_dict.get('high')\n\t\t\t\t\t\t# Add a ticker variable to meta data incase user enters incorrect ticker and there is no data.\n\t\t\t\t\t\tcurrent_stock_meta_dict['ticker'] = current_stock.ticker\n\t\t\t\t\t\t# Add the meta and price data to the current session\n\t\t\t\t\t\trequest.session['meta_data'][current_stock.ticker] = current_stock_meta_dict\n\t\t\t\t\t\trequest.session['price_data'][current_stock.ticker] = current_stock_price_dict\n\t\t\t\t\t\t# Explicitly save the session\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t# Reset the form instance.\n\t\t\t\t\t\tform = TickerForm()\n\n\n\t\t\t# Check wether the user enters data on the crypto currency ticker form.\n\t\t\telif request.POST.get(\"form_type\") == 'crypto_form':\n\t\t\t\tcrypto_form = CryptoTickerForm(request.POST)\n\t\t\t\tif crypto_form.is_valid():\n\t\t\t\t\tcrypto_ticker = request.POST['crypto_ticker']\n\t\t\t\t\t# If the variable crypto_ticker exists in the users portfolio send error message.\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif request.user.crypto_set.get(crypto_ticker=crypto_ticker) != None:\n\t\t\t\t\t\t\tmessages.info(request, 'Crypto ticker already exists in portfolio.')\n\t\t\t\t\t# Else create the Crypto Object in the database and link it to the current user.\n\t\t\t\t\texcept Crypto.DoesNotExist:\n\t\t\t\t\t\tCrypto.objects.create(\n\t\t\t\t\t\t\tcrypto_ticker = crypto_ticker, \n\t\t\t\t\t\t\tuser=request.user)\n\t\t\t\t\t\t# Get the currently created cryptocurrency ticker\n\t\t\t\t\t\tcurrent_crypto = Crypto.objects.get(crypto_ticker = crypto_ticker, user = request.user)\n\t\t\t\t\t\t# Get the meta data and price data for the current cryptocurrency\n\t\t\t\t\t\tcurrent_crypto_meta_dict = current_crypto.get_crypto_meta_data()\n\t\t\t\t\t\tcurrent_crypto_price_dict = current_crypto.get_crypto_price_data()\n\t\t\t\t\t\t# Add a crypto_ticker variable to meta data incase user enters incorrect ticker and there is no data.\n\t\t\t\t\t\tcurrent_crypto_meta_dict['crypto_ticker'] = current_crypto.crypto_ticker\n\t\t\t\t\t\t# Handle Error for no data on creation of invalid cryptocurrency object\n\t\t\t\t\t\tif len(current_crypto_price_dict) == 0:\n\t\t\t\t\t\t\tcurrent_crypto_price_dict.append({'topOfBookData':[{'lastPrice':'No_Data'}]})\n\n\t\t\t\t\t\t# Add the meta data and price data to the current session\n\t\t\t\t\t\trequest.session['crypto_meta_data'][current_crypto.crypto_ticker] = current_crypto_meta_dict\n\t\t\t\t\t\trequest.session['crypto_price_data_dict'][current_crypto.crypto_ticker] = current_crypto_price_dict\n\t\t\t\t\t\t# Save the session\n\t\t\t\t\t\trequest.session.modified = True\n\t\t\t\t\t\t# Reset the crypto_form\n\t\t\t\t\t\tcrypto_form = CryptoTickerForm()\n\t\t\t\t\t\n\n\n\t#Call a list of the users stocks and store it to be passed into the context.\n\tstock_list = request.user.stocks_set.all()\n\tcrypto_list = request.user.crypto_set.all()\n\n\t# Initialse dictionaries to store meta data and price data.\n\tstock_metadata_dict = {}\n\tstock_price_data_dict = {}\n\n\tcrypto_metadata_dict = {}\n\tcrypto_price_data_dict = {}\n\n\t# Loop through users stock and crypto portfolios and add meta and price data to respective dictionaries. \n\n\t# Only do this the first time the user logs into the site.\n\tif request.session.get('meta_data') == None:\n\t\tfor stock in stock_list:\n\t\t\tstock_metadata_dict[stock.ticker] = stock.get_meta_data()\n\t\t\tstock_price_data_dict[stock.ticker] = stock.get_price_data()\n\t\t\t# Add stocks highest price data to meta data dict for use on index page.\n\t\t\tstock_metadata_dict[stock.ticker]['high'] = stock_price_data_dict[stock.ticker].get('high')\n\t\t\t# Add a ticker to metadata dict incase user enters incorrect ticker and there is no data returned.\n\t\t\tstock_metadata_dict[stock.ticker]['ticker'] = stock.ticker\n\n\t\tfor crypto in crypto_list:\n\t\t\tcrypto_metadata_dict[crypto.crypto_ticker] = crypto.get_crypto_meta_data()\n\t\t\tcrypto_price_data_dict[crypto.crypto_ticker] = crypto.get_crypto_price_data()\n\t\t\t# Add a crypto_ticker to metadata dict incase user enters incorrect ticker and there is no data returned.\n\t\t\tcrypto_metadata_dict[crypto.crypto_ticker]['crypto_ticker'] = crypto.crypto_ticker\n\t\t\t# Handle error when there is no data recieved for an incorrect ticker.\n\t\t\tif len(crypto_price_data_dict[crypto.crypto_ticker]) == 0:\n\t\t\t\tcrypto_price_data_dict[crypto.crypto_ticker] = [{'topOfBookData':[{'lastPrice':'No Data'}]}]\n\t\n\t\t# Set session variables for meta and price data to be used throughout site.\n\t\trequest.session['meta_data'] = stock_metadata_dict\n\t\trequest.session['price_data'] = stock_price_data_dict\n\n\t\trequest.session['crypto_meta_data'] = crypto_metadata_dict\n\t\trequest.session['crypto_price_data_dict'] = crypto_price_data_dict\n\t\n\tcontext = {\n\t\t'form' : form,\n\t\t'crypto_form' : crypto_form,\n\t}\n\n\treturn render(request, 'index.html', context)", "def sell():\n if request.method == \"POST\":\n\n #test for selection of stocks\n if request.form.get(\"symbol\") == \"\" or request.form.get(\"shares\") == \"\":\n return apology(\"Please fill in all fields\")\n\n #test for positive integer\n if str.isdigit(request.form.get(\"shares\")) == False:\n return apology(\"Please select a positive number of shares\")\n\n # does the user have enough shares of that stock\n user_stock = request.form.get(\"symbol\")\n user_number = int(request.form.get(\"shares\"))\n owned = db.execute(\"SELECT SUM(number) FROM portfolio WHERE userid=:id AND stock=:stock\", stock = user_stock, id=session[\"user_id\"])\n owned = int(owned[0]['SUM(number)'])\n if user_number > owned:\n return apology(\"You don't have enough shares\")\n\n #in the portfolio table, add a negative to the number field of the purchased stock\n #in the cash table, lookup the current price and add the cash to the user's cash balanace\n else:\n pay = lookup(request.form.get(\"symbol\"))\n user_number = int(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash=cash+:total WHERE id=:userid\", total=(pay['price'] * user_number), userid=session[\"user_id\"])\n\n user_number = int(request.form.get(\"shares\")) * -1\n db.execute(\"INSERT INTO portfolio (stock, number, price, trans_price, userid) VALUES (:stock, :number, :price, :trans_price, :userid)\", stock=user_stock, number=user_number, price=(pay['price'] * user_number), trans_price=usd(pay['price']), userid=session[\"user_id\"])\n\n user_id=session[\"user_id\"]\n return redirect(url_for('index'))\n\n if request.method == \"GET\":\n #get stocks from portfolio and return to html form\n stocks = db.execute(\"SELECT stock FROM portfolio WHERE userid=:id GROUP BY stock\", id=session[\"user_id\"])\n return render_template(\"sell.html\", stocks=stocks)", "def plot_stock_prices(self, ins_id):\n # creating api-object\n # using api-object to get stock prices from API\n stock_prices = self._borsdata_api.get_instrument_stock_prices(ins_id)\n # calculating/creating a new column named 'sma50' in the table and\n # assigning the 50 day rolling mean to it\n stock_prices['sma50'] = stock_prices['close'].rolling(window=50).mean()\n # filtering out data after 2015 for plot\n filtered_data = stock_prices[stock_prices.index > dt.datetime(2015, 1, 1)]\n # plotting 'close' (with 'date' as index)\n plt.plot(filtered_data['close'], color='blue', label='close')\n # plotting 'sma50' (with 'date' as index)\n plt.plot(filtered_data['sma50'], color='black', label='sma50')\n # show legend\n plt.legend()\n # show plot\n plt.show()", "def simulate_trading(self):\n self._generate_trading_instances()\n self._run_backtest()\n self.portfolio.output_equity()\n res=self.portfolio.get_statistics()\n self.plot.plot_equity()\n return res", "def index():\n\n # Create lists containing values for the table\n symbols = []\n names = []\n shares = []\n totals = []\n prices = []\n\n # Query database for the current amount of cash and stocks\n cash = db.execute(\"SELECT cash FROM users WHERE id = :username\", username=session[\"user_id\"] )[0][\"cash\"]\n stocks = db.execute(\"SELECT * FROM summary WHERE id = :username\", username=session[\"user_id\"] )\n grand = cash\n\n # Append to the lists from the database\n for item in stocks:\n symbol = item[\"symbol\"]\n symbols.append(symbol)\n names.append(lookup(symbol)[\"name\"])\n share = db.execute(\"SELECT shares FROM summary WHERE id = :username AND symbol= :symbol\", username=session[\"user_id\"], symbol=symbol)[0][\"shares\"]\n shares.append(share)\n prices.append(lookup(symbol)[\"price\"])\n total = int(share) * lookup(symbol)[\"price\"]\n totals.append(total)\n grand += total\n\n # Obtain list length\n length = len(symbols)\n\n # Direct users to the index page\n return render_template(\"index.html\", symbols = symbols, length = length, cash=cash, names = names, shares = shares, totals = totals, prices = prices, grand = grand)", "def index(request):\n # Get biggest movers\n stock_mover = top_movers()\n\n # Get latest data\n stock_mover_quotes = {}\n for stock in stock_mover:\n all_of_quote = get_current_quote(stock.ticker)\n # Get jUut the fields you need from the result\n stock_mover_quotes[stock.ticker] = {\n k: all_of_quote.get(k, None) for k in ('Symbol', 'Name', 'Bid', 'Change', 'PercentChange')}\n\n # XXX messages should be a list of messages of the biggest movers\n messages = list(Message.objects.filter(source=\"twitter\"))[:33]\n messages += list(Message.objects.filter(source=\"stocktwits\"))[:33]\n messages += list(Message.objects.filter(source=\"reddit\"))[:33]\n random.shuffle(messages)\n\n return render(\n request,\n 'index.html',\n {\"streamer\": messages, \"stock_list\": stock_mover_quotes.values()}\n )", "def layout(symbols):\n periods = [\n ('1 day', 0),\n ('1 week', 1),\n ('1 month', 2),\n ('3 months', 3),\n ('1 year', 4),\n ('5 years', 5)\n ]\n return Div([\n H3('Stock prices'),\n Div([\n Div([_symbol_selector_dropdown(symbols)],\n style={\n 'width': '45%',\n 'float': 'left',\n 'display': 'inline-block'\n }),\n Div([_period_selector_radio(periods)],\n style={\n 'width': '45%',\n 'float': 'right',\n 'display': 'inline-block'\n })\n ], style={'display': 'inline-block', 'width': '100%'}),\n Graph(\n id='plot-stock',\n config={'displayModeBar': False}\n )\n ])", "def index():\n\n rows = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n print(\"rows= \" , rows)\n\n cash = rows[0] [\"cash\"]\n\n stocks = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n print(\"Stocks= \", stocks)\n\n holdings = 0\n for stock in stocks:\n print(stock[\"stock_code\"])\n stockDetail = lookup(stock[\"stock_code\"])\n print(\"StockDetail: \", stockDetail)\n stock_name = stockDetail[\"name\"]\n print(\"Stock Name: \", stock_name)\n\n if stockDetail == None:\n return apology(\"Not able to determine stock value\", 403)\n\n else:\n stockPrice = stockDetail[\"price\"]\n print(\"price of stock\", stockPrice)\n stock_name = stockDetail[\"name\"]\n # total value of each stock the user owns\n stock_value = stock[\"stock_quantity\"] * stockPrice\n holdings = holdings + stock_value\n stock[\"stock_name\"] = stock_name\n stock[\"stock_price\"] = usd(stockPrice)\n stock[\"stock_value\"] = usd(stock_value)\n print(\"Total value of each stock: \", stock_value)\n\n return render_template(\"index.html\", stocks=stocks,cash=usd(cash),total=usd(holdings+cash))", "def get_portfolio_object(self):\n return self.__get_portfolio_object(self.portfolio_name, self.portfolio_user)", "def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()", "def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_value(fund, currency, now, future))\n if best_currency != fund.currency:\n portfolio.request_transfer(fund, best_currency)", "def sell():\n rows = db.execute(\"SELECT stock_id, shares, stocks.symbol FROM portfolio JOIN stocks ON portfolio.stock_id = stocks.id WHERE user_id==:user_id\", user_id=session[\"user_id\"])\n if request.method==\"GET\":\n return render_template(\"sell.html\", rows=rows)\n else:\n symbol = request.form.get(\"symbol\")\n if symbol==\"None\":\n return apology(\"You must select a symbol\")\n # shares sold will be stored in history table with negative value\n shares = int(request.form.get(\"shares\"))*(-1)\n if abs(shares) > rows[0][\"shares\"]:\n return apology(\"You don't own enough shares\")\n # run lookup function\n dict_4 = lookup(symbol)\n price = dict_4[\"price\"]\n # Insert new transaction in 'history' table\n db.execute(\"INSERT INTO history(user_id, stock_id, price, shares, buy) VALUES(:user_id, :stock_id, :price, :shares, :buy)\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], price=price, shares=shares, buy=0)\n # UPDATE shares in 'portfolio' table\n new_shares = (rows[0][\"shares\"])+shares\n db.execute(\"UPDATE portfolio SET shares==:shares WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], shares=new_shares)\n # Update cash in 'users' table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n new_cash = row_cash[0][\"cash\"]-(price*shares)\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # message to be retrieved in portfolio.html when user sells stock\n flash('Sold!')\n return redirect(\"/\")", "def history():\n \n # selection of name, symbol, shares and cash of user stocks\n hist = db.execute(\"SELECT * FROM history WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"history.html\", hist=hist)", "def sell():\n\n if request.method == \"GET\":\n\n #Query for all the stocks in posession.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY name\", id=session[\"user_id\"])\n\n return render_template(\"sell.html\", ports=ports)\n if request.method == \"POST\":\n #Access the form data\n symbol = request.form.get(\"symbol\")\n\n #Check if the shares was an integer\n try:\n shares = int(request.form.get(\"shares\"))\n except:\n return apology (\"Please enter a whole number\", 400)\n\n #Query for the total quantity of that stock in posession\n get_quantity = db.execute(\"SELECT quantity FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n #Convert the quantity dict to int\n get_quantity_int = int(get_quantity[0]['quantity'])\n\n #Check if the user input a positive number.\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n\n #Get the current date and time\n now = datetime.now()\n\n date_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n #Lookup the stock symbol data (price, symbol, company name)\n if shares > get_quantity_int:\n return apology (\"Selling more than you own?\", 400)\n stock = lookup(symbol)\n\n stock_price = stock['price']\n\n #Created a new table using CREATE TABLE 'portfolio' ('user' text, 'quantity' integer, 'price' numeric(15, 2), 'symbol' text)\n\n #Get the total cash value of the user from the database\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float\n check_cash = float(get_cash[0]['cash'])\n\n if not stock:\n return apology (\"Please enter a valid stock\", 403)\n\n #Compute the total amount of the shares sold (One company stock only)\n total = stock_price * float(shares)\n\n #Update the total amount of cash in hand by adding the sold stocks.\n db.execute(\"UPDATE users SET cash = cash + :total WHERE id = :id\", id=session[\"user_id\"], total=total)\n\n #Check if the total quantity of shares is equal to the quantity the user is trying to sell.\n #Add the stock in the history table\n history = db.execute(\"INSERT INTO history (symbol, quantity, price, transacted, id) VALUES (?, ?, ?, ?, ?)\", symbol, int(shares) * -1, float(stock_price), date_time, session[\"user_id\"] )\n\n #If it's equal then delete the stock in the portfolio. #Else, Update the quantity of that stock in the portfolio.\n if shares == get_quantity_int:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n flash('You successfully sold the stock!')\n else:\n db.execute(\"UPDATE portfolio SET quantity = quantity - :shares, total = total -:total WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol=symbol, shares=shares, total=total)\n flash('You successfully sold the stock!')\n return redirect (url_for('index'))", "def _fetch_stock_page(*markets) -> bs4.BeautifulSoup:\n\n if len(markets) == 0:\n raise ValueError('No markets given')\n\n params = {\n 'Exchange' : 'NMF',\n 'SubSystem': 'Prices',\n 'Action' : 'GetMarket',\n 'app' : '/osakkeet',\n 'Market' : ','.join([x.value for x in markets]),\n # 'ext_xslt': '/nordicV3/inst_table_shares.xsl'\n }\n\n r = requests.get(_API_URL, params)\n response_text = r.text\n soup = bs4.BeautifulSoup(response_text, 'lxml')\n\n return soup", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must select a stock\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif float(request.form.get(\"shares\")) <= 0:\n return apology(\"number of shares must be greater than one\", 400)\n elif float(request.form.get(\"shares\")) > db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"]:\n return apology(\"you don't own enough shares\", 400)\n\n numberOfShares = float(request.form.get(\"shares\"))\n\n priceOfEachShare = db.execute(\"SELECT price FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"price\"]\n\n totalValue = numberOfShares * priceOfEachShare\n\n db.execute(\"UPDATE users SET cash = cash + {0} WHERE id=:userId\".format(totalValue), userId=session[\"user_id\"])\n\n db.execute(\"UPDATE portfolio SET number = number - {0} WHERE username=:username AND symbol=:symbol\".format(request.form.get(\"shares\")),\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n if db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"] == 0:\n db.execute(\"DELETE FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"), buyorsell=0, number=float(request.form.get(\"shares\")),\n price=priceOfEachShare, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n\n else:\n symbolsList = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n return render_template(\"sell.html\", stocks=symbolsList)", "def get_stock_data_frame(time, stock):\n\n print(\"Getting\", time, \"stock data for\", stock)\n url = 'https://api.iextrading.com/1.0/stock/'+stock+'/chart/'+time\n req = requests.get(url)\n print(url)\n\n print(\"Parsing data.\")\n rjson = req.text\n\n rdata = json.loads(rjson)\n\n dates = []\n openprices = []\n highprices = []\n lowprices = []\n closeprices = []\n volumes = []\n\n for i in rdata:\n date = i['date']\n dates.append(date)\n openprices.append(float(i['open']))\n highprices.append(float(i['high']))\n lowprices.append(float(i['low']))\n closeprices.append(float(i['close']))\n volumes.append(float(i['volume']))\n\n index = pd.DatetimeIndex(dates, dtype='datetime64[ns]')\n _open = pd.Series(openprices, index=index)\n high = pd.Series(highprices, index=index)\n low = pd.Series(lowprices, index=index)\n close = pd.Series(closeprices, index=index)\n data_frame_data = {'Open' : _open, 'High' : high, 'Low' : low, 'Close' : close}\n\n return pd.DataFrame(data_frame_data)", "def stocks_history(request):\n\n symbol = request.args.get('symbol')\n\n if symbol is None:\n return jsonify([])\n\n client = bigquery.Client()\n qry = client.query(\"\"\"\n SELECT \n date,\n adj_close,\n symbol,\n sma_20,\n std_20,\n sma_50,\n sma_200,\n bb_perc_20\n FROM `ticker-224822.ticker_test_120718.analytics_view`\n where \n symbol = '{symbol}'\n and extract(year from date) >= 2010\n \"\"\".format(symbol=symbol))\n\n results = qry.result()\n results = [dict(row.items()) for row in results]\n resp = custom_jsonify(results)\n resp.headers.add('Access-Control-Allow-Origin', '*')\n resp.headers.add('Access-Control-Allow-Methods', 'GET')\n return resp" ]
[ "0.7349525", "0.7257509", "0.7092902", "0.70840114", "0.70700926", "0.70322937", "0.70166755", "0.6955458", "0.69515324", "0.6818638", "0.67683095", "0.6739686", "0.66745", "0.66665316", "0.6584005", "0.65794265", "0.6577333", "0.6556571", "0.65012944", "0.64815927", "0.64735025", "0.6465535", "0.6312532", "0.63112044", "0.62762743", "0.62346", "0.62303627", "0.6182704", "0.61450326", "0.6122251", "0.611445", "0.61025023", "0.6082663", "0.607333", "0.606219", "0.60384244", "0.6035732", "0.60054773", "0.5978382", "0.59634656", "0.5923448", "0.591538", "0.5908193", "0.58903754", "0.58614", "0.58470833", "0.582957", "0.58252543", "0.5823499", "0.58229727", "0.58187795", "0.5812596", "0.58111256", "0.5809224", "0.5803079", "0.5794168", "0.5789423", "0.5779193", "0.5767472", "0.5761658", "0.5754713", "0.5751872", "0.5741388", "0.5727221", "0.5722986", "0.57177275", "0.571762", "0.57147825", "0.57071006", "0.5668939", "0.5649701", "0.56434345", "0.5641363", "0.5619027", "0.5619027", "0.5618465", "0.56122184", "0.5611063", "0.55942863", "0.55919564", "0.5583445", "0.5568468", "0.556346", "0.555581", "0.55501395", "0.55491537", "0.55472136", "0.55404145", "0.55395675", "0.5537525", "0.5537442", "0.55305004", "0.5529658", "0.5527494", "0.552352", "0.5509948", "0.5504186", "0.55028063", "0.54978365", "0.5485333" ]
0.67541313
11
Buy shares of stock
def buy(): if request.method == 'GET': return render_template('buy.html') elif request.method == 'POST': try: shares = int(request.form.get('shares')) except: return apology('Quantidade de ações não inteira') if shares < 0: return apology('Quantidade de ações não positiva') elif not lookup(request.form.get('symbol')): return apology('Código de ação inválido') stock_symbol = request.form.get('symbol') price = lookup(stock_symbol)['price'] total_purchase_cost = round((price * shares), 2) user_id = session.get('user_id') user_money = db.execute('SELECT dinheiro FROM users WHERE id = ?', user_id)[0]['dinheiro'] if total_purchase_cost > user_money: return apology("Dinheiro insuficiente") table_name = f'stocks_user{user_id}' db.execute("CREATE TABLE IF NOT EXISTS ? (stock_symbol TEXT NOT NULL, shares NUMBER NOT NULL, price NUMBER NOT NULL, time TEXT NOT NULL)", table_name) db.execute("INSERT INTO ? (stock_symbol, shares, price, time) VALUES(?, ?, ?, ?)", table_name, stock_symbol, shares, price, time_date()) db.execute("UPDATE users SET dinheiro = ? WHERE id = ?", (user_money - total_purchase_cost), user_id) return redirect('/')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def buy(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot buy less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\ttry:\n\t\t\tbal = await bank.withdraw_credits(ctx.author, shares * price)\n\t\texcept ValueError:\n\t\t\tbal = await bank.get_balance(ctx.author)\n\t\t\tawait ctx.send(\n\t\t\t\tf'You cannot afford {shares} share{plural} of {name}. '\n\t\t\t\tf'It would cost {price * shares} {currency} ({price} {currency} each). '\n\t\t\t\tf'You only have {bal} {currency}.'\n\t\t\t)\n\t\t\treturn\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name in user_stocks:\n\t\t\t\tuser_stocks[name]['count'] += shares\n\t\t\telse:\n\t\t\t\tuser_stocks[name] = {'count': shares, 'total_count': stock_data[name]['total_count']}\n\t\tawait ctx.send(\n\t\t\tf'You purchased {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def buy(self, stock, amount):\n self.orders[stock] += amount", "def buy_stock (self, ticker, buy_date, sell_date, amount):\n\n if self.__buy_stock_init__(ticker, buy_date, sell_date, amount) == False:\n return\n\n if self.__get_hist__() == False:\n return\n\n self.__calc_no_shares_to_buy__()\n self.__update_buy_amount__() \n self.__save_buy__()", "def buy(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price += np.abs(slip_factor)\n\n self.trade_manager.buy(from_symbol, to_symbol, price, amount, date)", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n \n # Creates dict\n symbol_info = lookup(request.form.get(\"symbol\"))\n \n # Checks that symbol exists\n if symbol_info == None:\n return apology(\"Invalid Symbol\", 403)\n \n # Ensure number of shares was submitted\n if not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 403)\n \n # Ensure shares is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough money to buy share\n user_money = db.execute(\"SELECT cash FROM users WHERE id=:userid\", userid=session[\"user_id\"])\n cash = float(user_money[0][\"cash\"])\n if cash < float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")):\n return apology(\"Not enough money\", 403)\n \n # Update user\n updated_money = cash - (float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")))\n db.execute(\"UPDATE users SET cash = :updated WHERE id=:usid\", updated=updated_money, usid=session[\"user_id\"])\n \n # Update shares table\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id = :usid\", usid=session[\"user_id\"])\n exist = 0\n for i in range(len(symbol_dicts)):\n if symbol_dicts[i][\"share\"].upper() == request.form.get(\"symbol\").upper():\n exist = 1\n break\n \n if exist == 0:\n db.execute(\"INSERT INTO shares (user_id, share, share_count) VALUES (:usid, :symbol, :count)\", usid=session[\"user_id\"], symbol=request.form.get(\"symbol\").upper(), count=int(request.form.get(\"shares\")))\n else:\n db.execute(\"UPDATE shares SET share_count = share_count + :count WHERE share = :symbol AND user_id = :usid\", count=int(request.form.get(\"shares\")), symbol=request.form.get(\"symbol\").upper(), usid=session[\"user_id\"])\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol_info[\"symbol\"], shares=request.form.get(\"shares\"), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(symbol_info[\"price\"]))\n \n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n # ensure proper symbol\n stock = lookup(request.form.get(\"symbol\"))\n if not stock:\n return apology(\"Invalid Symbol\")\n\n # ensure that user requests for correct number of shares\n try:\n shares = int(request.form.get(\"shares\"))\n if shares < 0:\n return apology(\"Amount of shares must be greater than 0\")\n except:\n return apology(\"Amount of shares must be greater than 0\")\n\n # Retrieve the cash a user has\n dollars = db.execute(\"SELECT cash FROM users WHERE id = :id\", \\\n id=session[\"user_id\"])\n\n # check if enough cash to buy\n if not dollars or float(dollars[0][\"cash\"]) < stock[\"price\"] * shares:\n return apology(\"You cannot buy shares! Please add more cash\")\n\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M\")\n\n\n # update history of shares bought\n db.execute(\"INSERT INTO history (symbol, shares, price, id, method, times, totaltrans) \\\n VALUES(:symbol, :shares, :price, :id, :method, :times, :totaltrans)\", \\\n symbol=stock[\"symbol\"], shares=shares, \\\n price=usd(stock[\"price\"]), id=session[\"user_id\"], method = \"Buy\", times= date_time, totaltrans = shares * stock[\"price\"] )\n\n # update user cash\n db.execute(\"UPDATE users SET cash = cash - :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n\n # Select user shares of that symbol\n user_shares = db.execute(\"SELECT shares FROM portfolio \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n\n # if user doesn't has shares of that symbol, create new stock object\n if not user_shares:\n db.execute(\"INSERT INTO portfolio (id, name, shares, symbol, price, total) \\\n VALUES(:id, :name, :shares, :symbol, :price, :total)\", \\\n id=session[\"user_id\"] , name=stock[\"name\"], \\\n shares=shares, symbol=stock[\"symbol\"], price=usd(stock[\"price\"]), \\\n total=usd(shares * stock[\"price\"]))\n\n # Else increment the shares count\n else:\n shares_total = user_shares[0][\"shares\"] + shares\n db.execute(\"UPDATE portfolio SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=shares_total, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n\n # return to index\n return redirect(url_for(\"index\"))", "def buy():\n \n user_id = session[\"user_id\"]\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n \n if request.method == \"POST\":\n \n # get required symbol\n symbol = request.form.get(\"symbol\").upper()\n try:\n qty = int(request.form.get(\"qty\"))\n except ValueError:\n return apology(\"QTY is empty!\", 400)\n \n # proceed buy function\n buy_result: Tuple[float, str] = buy_share(db, user_id, symbol, qty )\n if buy_result[0] == -1:\n return apology(buy_result[1], 400)\n\n return redirect(\"/\", 200)", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n # handle fractional, negative, and non-numeric share number\n if not symbol or lookup(symbol) == None:\n return apology(\"must provide valid symbol and share number\", 400)\n elif shares.isdigit() == False or int(shares) <= 0:\n return apology(\"must provide valid share number\", 400)\n\n # calculate total price for the buy request\n curr_price = lookup(symbol)[\"price\"]\n total_price = curr_price * int(shares)\n\n # db.execute returns list of dicts (one dict, actually), where key == \"cash\" and value - cash left in user's account\n cash_left = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])[0][\"cash\"]\n\n #ensure user has enough money to buy the shares\n if total_price > cash_left:\n return apology(\"not enough cash left\")\n\n # add stock to the users portfolio\n db.execute(\"INSERT INTO portfolio (id, Symbol, Company, Shares, Price, Total) VALUES(:id, :Symbol, :Company, :Shares, :Price, :Total)\",\n id=session[\"user_id\"], Symbol=symbol.upper(), Company=lookup(symbol)[\"name\"],\n Shares=shares, Price=curr_price, Total=total_price)\n\n # update cash\n db.execute('UPDATE users SET cash = :cash WHERE id = :id', cash=cash_left - total_price, id=session[\"user_id\"])\n\n flash(\"Bought!\")\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def marketBuy(self, currency_pair, amount):\n # calcular o rate num 'for'\n asks = self.rOrderBook(currency_pair=currency_pair, field='asks')\n list_resp = []\n for ask in asks:\n if ask[1] < amount:\n bought = self.limitBuy(currency_pair, rate=ask[0], amount=ask[1], ioc=True)\n list_resp.append(bought)\n amount -= ask[1]\n elif ask[1] >= amount:\n bought = self.limitBuy(currency_pair, rate=ask[0], amount=amount, ioc=True)\n list_resp.append(bought)\n amount -= amount\n break\n return list_resp", "async def buy(self, ctx, amount : float, symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Buy(amount, symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (ctx.message.author, portfolio.Value()))\n portfolio.Save()", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"Must enter a symbol\", 400)\n num_shares = request.form.get(\"shares\")\n if not num_shares:\n return apology(\"Must enter some number of shares to buy\", 400)\n company_quote = lookup(symbol)\n if company_quote == None:\n return apology(\"Invalid Symbol\", 400)\n num_shares = int(num_shares)\n if num_shares <= 0:\n return apology(\"Must enter a positve number of shares to buy\", 400)\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\",\n id=session['user_id'])\n balance = balance[0][\"cash\"]\n cost = num_shares * company_quote[\"price\"]\n if balance < cost:\n return apology(\"Insufficient cash\", 400)\n else:\n new_balance = balance - cost\n date_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n # Update history in history table\n return_val = db.execute(\"INSERT INTO 'history' (id, symbol, shares, price, transacted) VALUES (:id, :symbol, :shares, :price, :transacted)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], shares=num_shares, price=company_quote[\"price\"], transacted = date_time)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n\n #Update total number and value of each shares (symbol) held in totalshares table\n rows = db.execute(\"SELECT id, symbol, numshares, totalvalue FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if len(rows) != 1: #if nothing is returned i.e id and symbol combination does not already exist, insert it\n return_val = db.execute(\"INSERT INTO totalshares (id, symbol, numshares, totalvalue) VALUES (:id, :symbol, :numshares, :totalvalue)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], numshares=num_shares, totalvalue=cost)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n else: #if id, symbol combination exists already, update numshares and totalvalue\n new_numshares = rows[0][\"numshares\"] + num_shares\n new_totalvalue = rows[0][\"totalvalue\"] + cost\n return_val = db.execute(\"UPDATE totalshares SET numshares = :new_numshares, totalvalue = :new_totalvalue WHERE id = :id AND symbol = :symbol\",\n new_numshares=new_numshares, new_totalvalue=new_totalvalue, id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update balance in users table\n return_val = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=new_balance, id=session[\"user_id\"])\n if return_val != None:\n return redirect(\"/\")\n else:\n return apology(\"something went wrong\", 403)\n\n else:\n return render_template(\"buy.html\")", "def buy(self, price, volume):\r\n self.order(\"bid\", price, volume)", "def sell():\n if request.method == \"POST\":\n # Ensure data is inputted\n if not request.form.get(\"symbol\"):\n return apology(\"Insert symbol\", 403)\n \n if not request.form.get(\"shares\"):\n return apology(\"Insert number of shares to sell\", 403)\n \n # Ensure shares value is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough shares to sell \n share_count_dict = db.execute(\"SELECT share_count FROM shares WHERE user_id=:usid AND share=:share\", usid=session[\"user_id\"], share=request.form.get(\"symbol\").upper())\n share_count = int(share_count_dict[0][\"share_count\"])\n \n if int(request.form.get(\"shares\")) > share_count:\n return apology(\"You don't own enough shares\", 403)\n \n # Create variables\n symbol = request.form.get(\"symbol\").upper()\n quantity = int(request.form.get(\"shares\"))\n \n # Add cash to user data\n new_cash = float(lookup(symbol)[\"price\"]) * quantity\n db.execute(\"UPDATE users SET cash= cash + :cash WHERE id=:usid\", cash=new_cash, usid=session[\"user_id\"]) \n \n # Remove shares of user data\n db.execute(\"UPDATE shares SET share_count = share_count - :shares WHERE user_id=:usid AND share = :share\", shares=quantity,share=symbol, usid=session[\"user_id\"])\n db.execute(\"DELETE FROM shares WHERE user_id=:usid AND share_count = :shares\", usid=session[\"user_id\"], shares=0)\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol, shares='-' + str(quantity), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(lookup(symbol)[\"price\"]))\n \n return redirect(\"/\")\n \n else:\n # Create list with purchased symbols\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id=:usid\", usid=session[\"user_id\"])\n symbol_list = [None] * len(symbol_dicts)\n \n # Insert symbols into list\n for i in range(len(symbol_dicts)):\n symbol_list[i] = symbol_dicts[i][\"share\"]\n \n return render_template(\"sell.html\", longitude=len(symbol_dicts), symbols=symbol_list)", "def buy():\n if request.method == \"POST\":\n\n if not request.form.get(\"symbol\"):\n return apology(\"Missing symbol\")\n\n elif not request.form.get(\"shares\"):\n return apology(\"Missing shares\")\n # Проверка поля внутри формы, число или нет.\n elif not request.form.get(\"shares\").isdigit():\n return apology(\"Please chose integer\")\n # проверка числа на позитивность.\n elif int(request.form.get(\"shares\")) < 1:\n return apology(\"number of stocks is less zero\", 400)\n\n # проверка цены по символу\n symbol = request.form.get(\"symbol\")\n quote = lookup(symbol)\n # Проверка на валидность символа\n if quote == None :\n return apology(\"The stock does not exist\", 400)\n # Сохраняем цену данного символа в переменную\n price = quote[\"price\"]\n # Вибираем кеш пользователя из базы данных.\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n # цену символа умножаем на число пользователя, если оно больше чем бюджет,вернуть apology\n if float(price) * int(request.form.get(\"shares\")) > float(cash[0][\"cash\"]):\n return apology(\"You don't have enough cash\", 400)\n #\n else:\n # обновляем кеш\n rows3 = db.execute(\"UPDATE users SET cash =:update_cash WHERE id=:id\", update_cash = float(cash[0][\"cash\"]) - (float(price)*int(request.form.get(\"shares\"))), id=session[\"user_id\"])\n # Вибираем в портфеле все символы, для проверки на наличие shares (кол-во) акций\n rows2 = db.execute(\"SELECT * FROM portfolio WHERE id=:id AND symbol=:symbol\",id=session[\"user_id\"], symbol=symbol )\n # Если нету shares в определенном символе,тогда добавить.\n if len(rows2) == 0:\n db.execute(\"INSERT INTO partfolio ( id, symbol, shares) VALUES (:id, :symbol, :shares)\",id=session[\"user_id\"] )\n else:\n #Если есть уже кол-во акций,тогда обновить старое кол-во на новое кол-во.\n db.execute(\"UPDATE partfolio SET shares= shares + :shares\",shares = shares)\n\n\n else:\n\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n\n sym = request.form.get(\"symbol\").upper()\n shares = request.form.get(\"shares\")\n row = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n\n if not sym:\n return apology(\"must provide stock's symbol\")\n\n if not shares:\n return apology(\"must provide desired shares\")\n\n query = lookup(sym)\n if not query:\n return apology(\"lookup failed, try again later\")\n\n price = query['price']\n name = query['name']\n cash = row[0]['cash']\n user = row[0]['username']\n\n if cash < price * int(shares):\n return apology(\"Cannot afford \" + shares + \" shares of \" + sym)\n else:\n db.execute(\"INSERT INTO transactions (id, user, symbol, name, price, shares) VALUES(NULL, :user, :symbol, :name, :price, :shares)\",\n user=user, symbol=sym, name=name, price=price, shares=int(shares))\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash-price*int(shares), id=session['user_id'])\n return redirect(url_for(\"index\"))\n else:\n return render_template(\"buy.html\")", "def sell():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = row[0]['username']\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n if not result:\n return apology('no symbol available')\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n old_shares = row[0]['shares']\n \n if shares > old_shares:\n return apology('number exceeds available shares')\n \n new_shares = old_shares - shares\n \n if new_shares == 0:\n db.execute(\"DELETE FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n \n quote = lookup(symbol)\n price = quote['price']\n total_p = price * shares\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n old_cash = row[0]['cash']\n \n new_cash = old_cash + total_p\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=new_cash, id=session['user_id'])\n \n #current_time = time.strftime(time.localtime(\"%H:%M:%S %m/%d/%Y\"))\n current_time = time.asctime( time.localtime(time.time()) )\n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=0-shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"sell.html\")", "def buy():\n if request.method == \"POST\":\n quote = lookup(request.form.get(\"symbol\"))\n if quote is None:\n return apology(\"Please enter a valid symbol\")\n shares = int(request.form.get(\"Shares\"))\n if shares < 0:\n return apology(\"Please enter a positive value\")\n shares = request.form.get(\"Shares\")\n symbol = request.form.get(\"symbol\")\n rows = db.execute(\"SELECT * FROM cash\")\n cash = db.execute(\"SELECT * FROM cash WHERE id=:id\", id=session[\"user_id\"])\n\n if request.form.get(\"id\") not in rows:\n db.execute(\"INSERT INTO cash (id, symbol, name, shares, cash) VALUES(:id, :symbol, :name, :shares, :cash)\", id=session[\"user_id\"], symbol=symbol, name = quote[\"name\"], shares=shares, cash=10000)\n\n else:\n for row in cash:\n cash = db.execute(\"SELECT * FROM cash WHERE id=:id\", id=session[\"user_id\"])\n if row[\"Symbol\"] == symbol:\n db.execute(\"UPDATE cash SET shares=:shares WHERE Symbol=:Symbol\", shares=cash[row][\"shares\"]+int(shares), Symbol=symbol)\n else:\n db.execute(\"INSERT INTO cash (symbol, name, shares) VALUES(:symbol, :name, :shares)\", symbol=symbol, name = quote[\"name\"], shares=shares)\n\n cash = db.execute(\"SELECT * FROM cash WHERE id=:id\", id=session[\"user_id\"])\n\n current_cash = cash[0][\"Cash\"] - (int(shares)*int(quote[\"price\"]))\n\n if current_cash > 0:\n db.execute(\"UPDATE cash SET cash = :cash WHERE symbol=:symbol\", cash=current_cash, symbol=symbol)\n flash(\"Bought!\")\n else:\n return apology(\"Not enough cash\", 403)\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def buy():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n quote = lookup(symbol)\n \n if not quote:\n return apology(\"Invalid Symbol\")\n \n price = usd(quote[\"price\"])\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM users WHERE id= :id\", id=session[\"user_id\"])\n cash = row[0]['cash']\n \n total = shares * quote['price']\n \n if cash - total < 0:\n return apology('cannot afford')\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=(cash-total), id=session['user_id'])\n \n username = row[0]['username']\n \n #current_time = time.strftime(\"%H:%M:%S %m/%d/%Y\")\n current_time = time.asctime( time.localtime(time.time()) )\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n \n if result:\n old_shares = result[0]['shares']\n new_shares = old_shares + shares\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n else:\n db.execute(\"INSERT INTO portfolio (username, symbol, shares) VALUES (:username, :symbol, :shares)\", username=username,symbol=symbol,shares=shares)\n \n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n stock = lookup(symbol)\n cash = float(db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])[0][\"cash\"])\n if symbol == None or stock == None:\n return apology(\"The stock symbol you searched for doesn't exist.\")\n if not shares.isdigit():\n return apology(\"You have not entered a valid quantity of shares to buy.\")\n shares = int(shares)\n if shares < 1:\n return apology(\"You have entered an incorrect value for stock 'quantity'\")\n if (stock[\"price\"] * shares) > cash:\n return apology(\"You don't have enough cash to buy those stocks\")\n cost = round(shares*stock[\"price\"]*(-1), 2)\n db.execute(\"INSERT INTO shares (stock,symbol,value,quantity,cost,userid) VALUES(:st,:sy,:va,:qu,:co,:uid)\",\n {\"st\":stock[\"name\"],\"sy\":stock[\"symbol\"],\"va\":stock[\"price\"],\"qu\":shares,\"co\":cost,\"uid\":session[\"user_id\"]})\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\",{\"cash\":float(cash + cost),\"userid\":session[\"user_id\"]})\n inv = db.execute(\"SELECT quantity FROM inventory WHERE userid = :uid AND symbol = :sy\",\n {\"uid\":session[\"user_id\"],\"sy\":stock[\"symbol\"]})\n if not inv:\n db.execute(\"INSERT INTO inventory (symbol,quantity,userid) VALUES(:sy,:qu,:uid)\",\n {\"sy\":stock[\"symbol\"],\"qu\":shares,\"uid\":session[\"user_id\"]})\n else:\n quan = (shares + inv[0][\"quantity\"])\n db.execute(\"UPDATE inventory SET quantity = :qu WHERE userid =:uid AND symbol = :sy\",\n {\"qu\":quan,\"uid\":session[\"user_id\"],\"sy\":stock[\"symbol\"]})\n flash(\"Purchase completed successfully!\")\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must select a stock\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif float(request.form.get(\"shares\")) <= 0:\n return apology(\"number of shares must be greater than one\", 400)\n elif float(request.form.get(\"shares\")) > db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"]:\n return apology(\"you don't own enough shares\", 400)\n\n numberOfShares = float(request.form.get(\"shares\"))\n\n priceOfEachShare = db.execute(\"SELECT price FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"price\"]\n\n totalValue = numberOfShares * priceOfEachShare\n\n db.execute(\"UPDATE users SET cash = cash + {0} WHERE id=:userId\".format(totalValue), userId=session[\"user_id\"])\n\n db.execute(\"UPDATE portfolio SET number = number - {0} WHERE username=:username AND symbol=:symbol\".format(request.form.get(\"shares\")),\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n if db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"] == 0:\n db.execute(\"DELETE FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"), buyorsell=0, number=float(request.form.get(\"shares\")),\n price=priceOfEachShare, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n\n else:\n symbolsList = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n return render_template(\"sell.html\", stocks=symbolsList)", "def buy():\n if request.method == \"POST\":\n\n if not request.form.get(\"shares\"):\n return apology(\"gimme share\", 400)\n if not lookup(request.form.get(\"symbol\")):\n return apology(\"not correct stock\", 400)\n if not request.form.get(\"shares\").isdigit():\n return apology(\"sorry bro\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = money_list[0][\"cash\"]\n\n total_price = int(request.form.get(\"shares\")) * float(quote[\"price\"])\n\n if available_money < total_price:\n return apology(\"no money bro\", 400)\n\n insertion = db.execute(\"INSERT INTO transactions (id, stock, units, price, time, type) VALUES (:current_id, :stock, :units, :price, :now, :type)\",\n current_id=session[\"user_id\"], stock=request.form.get(\"symbol\"), units=request.form.get(\"shares\"), price=float(quote[\"price\"]), now=datetime.datetime.now(), type=\"B\")\n updating = db.execute(\"UPDATE users SET cash = cash - :upd_price WHERE id = :current_id\",\n upd_price=total_price, current_id=session[\"user_id\"])\n\n money_upd_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money_upd = money_upd_list[0][\"cash\"]\n\n return render_template(\"buy_result.html\",\n shares=request.form.get(\"shares\"),\n symbol=request.form.get(\"symbol\"),\n price=usd(total_price),\n cash=usd(available_money_upd))\n else:\n return render_template(\"buy.html\")", "def buyshares():\n # Initialise buy form\n buyform = BuyShareForm()\n # Validate and process form data\n if(buyform.validate_on_submit()):\n # Buys shares\n issuerID = buyform.buysharecode.data\n quantity = buyform.buyquantity.data\n userID = current_user.userID\n # Call buyshare API\n buyshare = gdb.buyshare(userID, issuerID, quantity)\n if(buyshare):\n # Flash with success message\n flash(\"Share purchase successful!\", category=\"success\")\n else:\n # Flash with warning message\n flash(\"Share purchase unsuccessful!\", category=\"error\")\n # Redirect to reffering page or dashboard\n return redirect(request.referrer or url_for('main.dashboard'))", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\")\n elif not request.form.get(\"sharesnumber\"):\n return apology(\"must provide no of shares\")\n elif '.' in request.form.get(\"sharesnumber\"):\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not request.form.get(\"sharesnumber\").isdigit():\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not int(request.form.get(\"sharesnumber\")) > 0:\n return apology(\"No of shares is positive value Invalid!!\")\n \n result_dict = lookup(request.form.get(\"symbol\"))\n \n if result_dict == None:\n return apology(\"Symbol does not exist\")\n \n result_cash = db.execute(\"SELECT * from users where id = :id\",id=session[\"user_id\"])\n net_cash = result_cash[0][\"cash\"]\n net_required = int(request.form.get(\"sharesnumber\")) * result_dict['price']\n if net_required > net_cash:\n return apology(\"Oops Don't Have enough Cash!!\")\n \n \n #Update Cash\n net_cash = net_cash - net_required\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",cash= net_cash,id=session[\"user_id\"])\n \n #Update History Tables\n \n db.execute(\"INSERT INTO history(user_id,symbol,price,shares) VALUES(:id,:symbol,:price,:shares) \",id=session[\"user_id\"],symbol=result_dict['symbol'],price=result_dict['price'],shares=request.form.get(\"sharesnumber\"))\n \n #Check Whether user has shares for same symbol\n rows = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol=:symbol\",id=session[\"user_id\"],symbol=result_dict['symbol'])\n #Update NetShares Table\n if len(rows) == 0:\n db.execute(\"INSERT INTO netshares(user_id,symbol,shares) VALUES(:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=result_dict['symbol'],shares=request.form.get(\"sharesnumber\"))\n else:\n db.execute(\"UPDATE netshares SET shares=:shares WHERE user_id = :id AND symbol=:symbol\",shares= int(request.form.get(\"sharesnumber\"))+int(rows[0]['shares']),id=session[\"user_id\"],symbol=result_dict['symbol'])\n return redirect(url_for(\"index\"))\n \n else:\n return render_template(\"buy.html\")\n \n \n #return apology(\"TODO\")", "def sell():\n if request.method == \"GET\":\n return render_template('sell.html')\n \n if request.method == \"POST\":\n symbol = request.form['symbol']\n shares = request.form['shares']\n stock = lookup(symbol)\n \n if not stock:\n return apology('Invalid symbol')\n \n user_shares = db.execute(\"SELECT shares FROM profile \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n if not user_shares or int(user_shares[0][\"shares\"]) < int(shares):\n return apology(\"Not enough shares\")\n db.execute(\"INSERT INTO history (company, shares, value, id, date) \\\n VALUES(:symbol, :shares, :price, :id, :date)\", \\\n symbol=stock[\"symbol\"], shares=-int(shares), \\\n price=stock[\"price\"], id=session[\"user_id\"], date = str(date.today())) \n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n \n shares_total = user_shares[0][\"shares\"] - int(shares)\n if shares_total == 0:\n db.execute(\"DELETE FROM profile \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n else:\n db.execute(\"UPDATE profile SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=shares_total, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n return redirect(url_for(\"index\"))", "def sell():\n userid = session[\"user_id\"]\n if request.method == \"GET\":\n symbol = db.execute(\"SELECT symbol FROM purchase WHERE id=:uid\",uid=userid)\n # print(symbol)\n symbols = []\n for s in symbol:\n temp = s[\"symbol\"]\n symbols.append(temp)\n # print(symbols)\n return render_template(\"sell.html\", symbols=symbols)\n else:\n symbol_entry = request.form.get(\"symbol\")\n shares_entry = int(request.form.get(\"shares\"))\n if not symbol_entry or not shares_entry:\n return apology(\"Please select both symbol and shares\")\n\n data = db.execute(\"SELECT symbol, shares FROM purchase WHERE id=:uid\",uid=userid)\n share_check = 0\n\n for s in data:\n if(s[\"symbol\"] == symbol_entry):\n share_check = s[\"shares\"]\n # print(share_check)\n if shares_entry > share_check:\n return apology(\"You don't have this many shares of this company\")\n\n current_cash = (db.execute(\"SELECT cash FROM users WHERE id=:uid\", uid=userid))[0].get(\"cash\")\n query = lookup(symbol_entry)\n share_price = query[\"price\"]\n sold_price = share_price * shares_entry\n\n db.execute(\"UPDATE users SET cash=:sold WHERE id=:uid\",sold=sold_price+current_cash, uid=userid)\n if shares_entry == share_check:\n db.execute(\"DELETE FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol_entry, uid=userid)\n else:\n db.execute(\"UPDATE purchase SET shares=:shares WHERE symbol=:symbol AND id=:uid\",shares=share_check-shares_entry,symbol=symbol_entry, uid=userid)\n\n nshare = -shares_entry\n dt = datetime.now(timezone(timedelta(hours=6)))\n dt = dt.strftime(\"%d-%m-%Y %H:%M:%S\")\n db.execute(\"INSERT INTO history (id, symbol, shares, price, time) VALUES (:userid, :symbol, :shares, :price, :time)\", userid=userid, symbol=symbol_entry,shares=nshare,price=share_price, time=dt)\n return render_template(\"sell.html\", message=\"Sold!\")\n print(data)", "def buy():\n\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\")\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be a positive integer\", 400)\n\n if not request.form.get(\"shares\"):\n return apology(\"missing shares\")\n\n try:\n shares = int(request.form.get(\"shares\"))\n except:\n return apology(\"shares must be a positive integer\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n if not quote:\n return apology(\"Invalid symbol\")\n\n row = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n cash = int(row[0][\"cash\"])\n\n amount = quote[\"price\"] * float(request.form.get(\"shares\"))\n\n if cash < amount:\n return apology(\"not enough cash\")\n\n # add transaction to history\n db.execute(\"INSERT INTO histories (symbol, shares, price, id) VALUES(:symbol, :shares, :price, :id)\",\n symbol=quote[\"symbol\"], shares=request.form.get(\"shares\"), price=usd(quote[\"price\"]), id=session[\"user_id\"])\n\n # update cash remaining in database\n db.execute(\"UPDATE users SET cash = cash - :amount WHERE id=:id\", amount=amount, id=session[\"user_id\"])\n\n # check if user owns a share of symbol already\n user_shares = db.execute(\"SELECT * FROM portfolio WHERE id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=quote[\"symbol\"])\n\n # if symbol is new\n if not user_shares:\n db.execute(\"INSERT INTO 'portfolio' ('Symbol','Shares','id','Name','Price') VALUES (:symbol, :shares, :id, :name, :price) \",\n symbol=quote[\"symbol\"], shares=request.form.get(\"shares\"), id=session[\"user_id\"], name=quote[\"name\"], price=quote[\"price\"])\n flash(\"Bought\")\n return redirect(\"/\")\n\n # if user already owns some share of the symbol\n else:\n total_shares = user_shares[0][\"shares\"] + int(request.form.get(\"shares\"))\n db.execute(\"UPDATE portfolio SET shares=:total_shares WHERE id=:id AND symbol=:symbol\",\n total_shares=total_shares, id=session[\"user_id\"], symbol=quote[\"symbol\"])\n flash(\"Bought\")\n return redirect(\"/\")\n\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n\n # get share symbol from form\n symb = request.form.get(\"symbol\")\n # check if there is text and that it is a symbol\n if symb is None:\n return apology(\"Invalid symbol\", 400)\n else:\n # retrieve stock price, symbol and stock name via lookup function\n quote = lookup(request.form.get(\"symbol\"))\n\n # retrieve number of shares wanted as an int\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n # return apology if not int\n return apology(\"Invalid amount\", 400)\n\n # if stock does not exist or is blank or if there is no quantity then apologise\n if quote is not None and shares > 0 :\n # get current user's cash. query session dict for current user logged in\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n cash = cash[0][\"cash\"]\n\n # check that user has enough cash to purchase shares\n if cash > shares * quote[\"price\"]:\n # insert transaction into portfolio table if user has enough cash\n db.execute(\"INSERT INTO portfolio (name, userid, price, quantity) VALUES (:name, :userid, :price, :quantity)\",name=quote[\"symbol\"],userid=session[\"user_id\"], price=quote[\"price\"], quantity=shares)\n # update user's cash in the users table\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash-shares*quote[\"price\"], id=session[\"user_id\"])\n # return user to index summary page after purchase\n return redirect(\"/\")\n else:\n flash(\"Not enough cash!\")\n return redirect(\"/buy\")\n else:\n return apology(\"Stock does not exist or quantity not given\", 400)\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")\n\n return apology(\"Buy failed\", 400)", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n tick = request.form.get(\"ticker\")\n quote = lookup(tick)\n if not quote:\n return apology(\"Ticker does not exist\")\n shares = int(request.form.get(\"shares\"))\n if shares <= 0:\n return apology(\"Please input a valid number of shares\")\n money = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n if float(money[0][\"cash\"]) < quote[\"price\"] * shares:\n return apology(\"Not enough money\")\n db.execute(\"UPDATE users SET cash = cash - :purchase WHERE id = :id\", id=session[\"user_id\"], purchase=(quote[\"price\"] * float(shares)))\n findshares = db.execute(\"SELECT shares FROM purchases WHERE user_id = :id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"])\n\n if not findshares:\n db.execute(\"INSERT INTO purchases (username, shares, price, total, ticker, user_id) VALUES(:username, :shares, :price, :total, :ticker, :id)\", username=quote[\"name\"], shares=shares, price=usd(quote[\"price\"]), total=usd(shares * quote[\"price\"]), ticker=quote[\"symbol\"], id=session[\"user_id\"])\n else:\n db.execute(\"UPDATE purchases SET shares=:number, total=:total WHERE user_id=:id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"], total=(float(quote[\"price\"])*float(shares)), number=int(findshares[0][\"shares\"]) + int(shares))\n return redirect(url_for(\"index\"))", "def buy(self,\n currency_pair,\n rate,\n amount):\n pass", "async def sell(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot sell less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name not in user_stocks:\n\t\t\t\tawait ctx.send(f'You do not have any shares of {name}.')\n\t\t\t\treturn\n\t\t\tif shares > user_stocks[name]['count']:\n\t\t\t\tawait ctx.send(\n\t\t\t\t\tf'You do not have enough shares of {name}. '\n\t\t\t\t\tf'You only have {user_stocks[name]} share{plural}.'\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\tuser_stocks[name]['count'] -= shares\n\t\t\tif user_stocks[name]['count'] == 0:\n\t\t\t\tdel user_stocks[name]\n\t\tbal = await bank.deposit_credits(ctx.author, shares * price)\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(\n\t\t\tf'You sold {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def sell():\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"Must enter a symbol\", 400)\n num_shares = request.form.get(\"shares\")\n if not num_shares:\n return apology(\"Must enter some number of shares to sell\", 400)\n company_quote = lookup(symbol)\n if company_quote == None:\n return apology(\"Invalid Symbol\", 400)\n num_shares = int(num_shares)\n if num_shares <= 0:\n return apology(\"Must enter a positve number of shares to sell\", 400)\n\n rows = db.execute(\"SELECT id, symbol, numshares FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if len(rows) != 1:\n return apology(\"You do not have shares of \" + symbol, 400)\n if num_shares > rows[0][\"numshares\"]:\n return apology(\"You cannot sell more shares than you have\", 400)\n\n sale_value = num_shares * company_quote[\"price\"]\n\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\",\n id=session['user_id'])\n balance = balance[0][\"cash\"]\n new_balance = balance + sale_value\n date_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n # Update history in history table\n return_val = db.execute(\"INSERT INTO 'history' (id, symbol, shares, price, transacted) VALUES (:id, :symbol, :shares, :price, :transacted)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], shares=num_shares*-1, price=company_quote[\"price\"], transacted = date_time)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update total number and value of each shares (symbol) held in totalshares table\n rows = db.execute(\"SELECT id, symbol, numshares, totalvalue FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n new_numshares = rows[0][\"numshares\"] - num_shares\n new_totalvalue = rows[0][\"totalvalue\"] - sale_value\n return_val = db.execute(\"UPDATE totalshares SET numshares = :new_numshares, totalvalue = :new_totalvalue WHERE id = :id AND symbol = :symbol\",\n new_numshares=new_numshares, new_totalvalue=new_totalvalue, id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update balance in users table\n return_val = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=new_balance, id=session[\"user_id\"])\n if return_val != None:\n return redirect(\"/\")\n else:\n return apology(\"something went wrong\", 403)\n else:\n rows = db.execute(\"SELECT symbol, numshares FROM totalshares WHERE id = :id\", id=session[\"user_id\"])\n symbol_options = []\n if rows != None and len(rows) > 0:\n for row in rows:\n if row[\"numshares\"] > 0:\n symbol_options.append(row[\"symbol\"])\n return render_template(\"sell.html\", symbol_options=symbol_options)", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n stock = lookup(request.form.get(\"symbol\"))\n\n if stock == None:\n return apology(\"Symbol not found. Please re-check the symbol and try again!\")\n\n shares = int(request.form.get(\"shares\"))\n if not shares or int(shares) <= 0:\n return apology(\"Invalid shares. Please re-check and try again!\")\n\n company_name = stock[\"name\"]\n price = float(stock[\"price\"])\n symbol = stock[\"symbol\"]\n userid = session[\"user_id\"]\n available_cash = (db.execute(\"SELECT cash FROM users WHERE id=:id\", id = userid))[0].get(\"cash\")\n total = shares*price\n if total > available_cash:\n return apology(\"Sorry! You do not have sufficient balance\")\n else:\n check = (db.execute(\"SELECT symbol FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol, uid=userid))\n dt = datetime.now(timezone(timedelta(hours=6)))\n dt = dt.strftime(\"%d-%m-%Y %H:%M:%S\")\n db.execute(\"INSERT INTO history (id, symbol, shares, price, time) VALUES (:userid, :symbol, :shares, :price, :time)\", userid=userid, symbol=symbol,shares=shares,price=price, time=dt)\n db.execute(\"UPDATE users SET cash=:cash WHERE id=:uid\", cash=available_cash-shares*price, uid=userid)\n\n # check = (db.execute(\"SELECT symbol FROM history WHERE symbol=:symbol\", symbol=symbol))[0].get(\"symbol\")\n print(len(check))\n if len(check) == 0:\n db.execute(\"INSERT INTO purchase (id, symbol, name, shares) VALUES (:userid, :symbol, :name, :shares)\", userid=userid, symbol=symbol, name=company_name, shares=shares)\n else:\n exshares = int((db.execute(\"SELECT shares FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol,uid=userid))[0].get(\"shares\"))\n # print(exshares+\" \"+type(exshares))\n extotal = float((db.execute(\"SELECT total FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol,uid=userid))[0].get(\"total\"))\n db.execute(\"UPDATE purchase SET shares=:newshares WHERE symbol=:symbol AND id=:uid\", newshares=shares+exshares, symbol=symbol, uid=userid)\n return render_template(\"bought.html\", company_name=company_name, shares=shares, symbol=symbol, usd=usd(shares*price), balance=usd(available_cash-shares*price))", "def sell():\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n # ensure proper symbol\n stock = lookup(request.form.get(\"symbol\"))\n if not stock:\n return apology(\"Invalid Symbol\")\n\n # ensure proper number of shares\n try:\n shares = int(request.form.get(\"shares\"))\n if shares < 0:\n return apology(\"Amount of shares must be greater than 0\")\n except:\n return apology(\"Amount of shares must be greater than 0\")\n\n # select the symbol shares of that user\n user_shares = db.execute(\"SELECT shares FROM portfolio \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n\n # check if enough shares to sell\n if not user_shares or int(user_shares[0][\"shares\"]) < shares:\n return apology(\"You don't hold enough shares\")\n\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M\")\n\n # update history of a sell\n db.execute(\"INSERT INTO history (symbol, shares, price, id, method, times, totaltarns) \\\n VALUES(:symbol, :shares, :price, :id, :method, :times, :totaltrans)\", \\\n symbol=stock[\"symbol\"], shares=-shares, \\\n price=usd(stock[\"price\"]), id=session[\"user_id\"], method= \"sell\", times= date_time, totaltrans = shares * stock[\"price\"])\n\n # update user cash (increase)\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n\n # decrement the shares count\n amountshares = user_shares[0][\"shares\"] - shares\n\n # if after decrement is zero, delete shares from portfolio\n if amountshares == 0:\n db.execute(\"DELETE FROM portfolio \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n # otherwise, update portfolio shares count\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=amountshares, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n\n # return to index\n return redirect(url_for(\"index\"))", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide ticker\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif not request.form.get(\"shares\").isdigit():\n return apology(\"must enter numbers\", 400)\n elif float(request.form.get(\"shares\")) <= 0 or (float(request.form.get(\"shares\")) % 1 != 0):\n return apology(\"number must be integer greater than one\", 400)\n elif not lookup(request.form.get(\"symbol\")):\n return apology(\"couldn't find company\", 400)\n\n currentSymbols = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n\n for symbol in currentSymbols:\n if symbol[\"symbol\"].lower() == request.form.get(\"symbol\").lower():\n return apology(\"you've already bought that stock\", 403)\n\n currentBalance = db.execute(\"SELECT cash from users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n buyingPrice = lookup(request.form.get(\"symbol\"))[\"price\"] * float(request.form.get(\"shares\"))\n if currentBalance < buyingPrice:\n return apology(\"not enough cash\", 403)\n else:\n db.execute(\"UPDATE users SET cash = cash - {0} WHERE id=:userId\".format(buyingPrice), userId=session[\"user_id\"])\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n symbol = lookup(request.form.get(\"symbol\"))[\"symbol\"]\n numberOfShares = float(request.form.get(\"shares\"))\n price = lookup(request.form.get(\"symbol\"))[\"price\"]\n date = datetime.datetime.utcnow()\n db.execute(\"INSERT INTO portfolio (username, symbol, number, price, date) VALUES(:username, :symbol, :number, :price, :date)\",\n username=username, symbol=symbol, number=numberOfShares, price=price, date=date)\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=symbol, buyorsell=1, number=float(request.form.get(\"shares\")),\n price=price, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def sell():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n \n # initiate variables\n shares = int(request.form.get(\"shares\"))\n stocks = []\n \n # obtain user's stock information from portfolio database\n stocks = db.execute(\"SELECT shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n # check that user actually owns enough stock, or any stock at all\n if stocks == []:\n return apology(\"you don't own any of this stock\")\n if shares > stocks[0][\"shares\"]:\n return apology(\"invalid number of shares\")\n \n # calculate price per share and cost of all shares\n price = round(float(quote[\"price\"]),2)\n cost = round(float(shares * price),2)\n \n # update user's cash balance\n db.execute(\"UPDATE users SET cash = cash + :cost WHERE id = :id\", cost = cost, id=session[\"user_id\"])\n \n # if there are still shares leftover after sale, update row\n if shares < stocks[0][\"shares\"]:\n db.execute(\"UPDATE portfolio SET shares = shares - :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], shares = shares, symbol = quote[\"symbol\"])\n \n # otherwise, if not shares leftover, remove row from portfolio entirely\n elif shares == stocks[0][\"shares\"]:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=-shares,price=price)\n \n flash('Sold!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"sell.html\")", "def sell():\n username = session.get(\"username\")\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n req_quantity = request.form.get(\"shares\")\n if not req_quantity.isdigit() or int(req_quantity)<=0:\n return apology(\"Quantity must be positive integer\", 400)\n req_quantity = int(req_quantity)\n status = \"sold\"\n\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n owned_stock = db.execute(\"SELECT SUM(quantity) FROM history WHERE username=:username GROUP BY stock_symbol HAVING stock_symbol=:symbol\",\n username=username, symbol=symbol)\n if owned_stock:\n owned_quantity = owned_stock[0][\"SUM(quantity)\"]\n stock = lookup(symbol)\n price = stock[\"price\"]\n name = stock[\"name\"]\n else:\n owned_quantity = 0\n if owned_quantity>=req_quantity:\n total_value = req_quantity * price\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :symbol, :price, :time, :quantity, :name, :status)\",\n username=username, symbol=symbol, price=price, time=time, quantity=-req_quantity, name=name, status=status)\n db.execute(\"UPDATE users SET cash = cash+:total_value WHERE username=:username\",\n total_value=total_value, username=username)\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n message = f\"Recorded sold {req_quantity} share(s) of {name} total {usd(total_value)}, your new cash balance is {usd(cash)}\"\n return render_template(\"sell.html\", message = message)\n else:\n return apology(\"Insufficient shares\", 400)\n # if db.execute()\n else:\n stock_options = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n stock_options = [s[\"stock_symbol\"] for s in stock_options]\n\n # print(f\"Stock options: {stock_options}\")\n return render_template(\"sell.html\", options = stock_options)", "def sell():\n if request.method == \"POST\":\n\n if not request.form.get(\"shares\"):\n return apology(\"gimme share\", 400)\n if not lookup(request.form.get(\"symbol\")):\n return apology(\"not correct stock\", 400)\n if not request.form.get(\"shares\").isdigit():\n return apology(\"sorry bro\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = money_list[0][\"cash\"]\n\n total_price = int(request.form.get(\"shares\")) * float(quote[\"price\"])\n\n units_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE id = :current_id AND stock = :stock_code\",\n current_id=session[\"user_id\"], stock_code=request.form.get(\"symbol\"))\n available_units = units_list[0][\"SUM(units)\"]\n\n if available_units < int(request.form.get(\"shares\")):\n return apology(\"no units bro\", 400)\n\n new_cash = available_money + total_price\n\n updating = db.execute(\"UPDATE users SET cash = :upd_cash WHERE id = :current_id\",\n upd_cash=new_cash, current_id=session[\"user_id\"])\n insertion = db.execute(\"INSERT INTO transactions (id, stock, units, price, time, type) VALUES (:current_id, :stock, :units, :price, :now, :type)\",\n current_id=session[\"user_id\"], stock=request.form.get(\"symbol\"), units=request.form.get(\"shares\"), price=float(quote[\"price\"]), now=datetime.datetime.now(), type=\"S\")\n\n money_upd_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money_upd = money_upd_list[0][\"cash\"]\n\n return render_template(\"sell_result.html\", shares=request.form.get(\"shares\"),\n symbol=request.form.get(\"symbol\"),\n price=usd(total_price),\n cash=usd(new_cash))\n else:\n available_stocks_info = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_stocks_list = []\n for element in available_stocks_info:\n if element[\"stock\"] not in available_stocks_list:\n available_stocks_list.append(element[\"stock\"])\n\n return render_template(\"sell.html\", available_stocks=available_stocks_list)", "def sell():\n\n if request.method == \"POST\":\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n if not symbol:\n return apology(\"please select a valid symbol\")\n\n target_stock = db.execute(\"SELECT *, sum(shares) FROM transactions WHERE user=:user AND symbol=:symbol\",\n user=user, symbol=symbol)\n print(target_stock)\n if not shares:\n return apology(\"must provide how many shares to sell\")\n\n elif shares > target_stock[0]['sum(shares)'] or shares < 1:\n return apology(\"shares must be more than 0 and less than \" + str(target_stock[0]['shares']))\n\n query = lookup(symbol)\n price = query['price']\n name = query['name']\n cash = entry[0]['cash']\n\n db.execute(\"INSERT INTO transactions (id, user, symbol, name, price, shares) VALUES(NULL, :user, :symbol, :name, :price, :shares)\",\n user=user, symbol=symbol, name=target_stock[0]['name'], price=price, shares=-int(shares))\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash+price*shares, id=session['user_id'])\n\n return redirect(url_for(\"index\"))\n\n else:\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n\n return render_template(\"sell.html\", stocks=owned)", "def buy():\n if request.method == \"POST\":\n\n stock = lookup(request.form.get(\"symbol\"))\n\n # This took a while for check to confirm. First check that user input is digit.\n if not request.form.get(\"shares\").isdigit():\n return apology(\"Inavalid number of shares\")\n numOfShares = request.form.get(\"shares\")\n\n # If request is POST firstly check if anything has been submitted.\n if not request.form.get(\"symbol\"):\n return apology(\"You haven't typed a symbol\")\n # if stock lookup request is None or if the numOfShares is not a number of 1 or higher return apologies.\n if stock is None:\n return apology(\"This doesn't seem to be a valid symbol, try again\")\n # userID and user serparate in case both are required.\n userID = session[\"user_id\"]\n user = db.execute(\"SELECT * FROM users WHERE id = :id\", id=userID)\n #funds is a float and can be multiplied by number of shares\n funds = float(user[0][\"cash\"])\n purchasePrice = stock[\"price\"] * int(numOfShares)\n\n date_time = datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n\n\n if funds < purchasePrice:\n return apology(\"You don't have sufficient funds to make this purchase\")\n else:\n # Take price off total cash for current user.\n db.execute(\"UPDATE users SET cash = cash - :purchasePrice WHERE id = :userID\", purchasePrice=purchasePrice, userID=userID)\n # Insert into transactions table the id, symbol, number of share bought, price per share, the time,date and the BUY order.\n db.execute(\"INSERT INTO transactions (id, symbol, num_shares, price_ps, date_time, buy_or_sell) VALUES (:id, :symbol, :num_shares, :price_ps, :date_time, :buy_or_sell)\",\n id=userID, symbol=stock[\"symbol\"], num_shares=numOfShares, price_ps=stock[\"price\"], date_time=date_time, buy_or_sell=\"BUY\")\n # stockowned allows search of portfolio table for results that have userID and the bought stock.\n stockOwned = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND id=:userID\", symbol=stock[\"symbol\"], userID=userID)\n # If there are nor results (not stockowned) then insert into portfolio\n if not stockOwned:\n db.execute(\"INSERT INTO portfolio (id, symbol, numOwned, pricePerShare, totalValue) VALUES (:userID, :symbol, :numOwned, :pricePerShare, :totalValue)\",\n userID=userID, symbol=stock[\"symbol\"], numOwned=numOfShares, pricePerShare=stock[\"price\"], totalValue=purchasePrice)\n # Other wise update the current results. Had to ensuer numOf Share was floas was sotred as a str. Using indexes of stockowned for values.\n else:\n newNumOwned = stockOwned[0][\"numOwned\"] + float(numOfShares)\n newTotalValue = stockOwned[0][\"totalValue\"] + purchasePrice\n newPPS = \"%.2f\"%(newTotalValue / newNumOwned)\n db.execute(\"UPDATE portfolio SET numOwned = :newNumOwned, totalValue = :newTotalValue, pricePerShare = :newPPS WHERE symbol=:symbol AND id=:userID\",\n newNumOwned=newNumOwned, newTotalValue=newTotalValue, newPPS=newPPS, symbol=stock[\"symbol\"], userID=userID)\n\n return redirect(\"/\")\n\n # If a GET request, return the buy.html template.\n else:\n return render_template(\"buy.html\")", "def buy():\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Access form data\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Ensure symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n if not shares:\n return apology(\"must provide shares\", 400)\n\n # Check if submitted shares string is a positive integer\n if not shares.isdigit() :\n return apology(\"shares is not a number\", 400)\n # Shares is valid\n else:\n shares = int(shares)\n\n # Obtain quote using lookup function\n QUOTED = lookup(symbol)\n\n # Ensure valid symbol was submitted\n if QUOTED is None:\n return apology(\"invalid symbol\", 400)\n\n # Check if user has enough cash to buy shares\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", user_id)[0][\"cash\"]\n cost = QUOTED[\"price\"] * shares\n if cash < cost:\n return apology(\"can't afford\", 400)\n\n # New amount of cash user has after buying shares\n new_cash_total = cash - cost\n\n # Update cash in users table for user\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", new_cash_total, user_id)\n\n # Insert buy log into history table\n db.execute(\"INSERT INTO history (user_id, symbol, shares, price, transacted) VALUES (?, ?, ?, ?, datetime('now'))\",\n user_id, QUOTED[\"symbol\"], shares, QUOTED[\"price\"])\n\n # Keep track of shares in shares table\n current_shares = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\", user_id, QUOTED[\"symbol\"])\n\n # If shares have not been bought before\n if not current_shares:\n db.execute(\"INSERT INTO shares VALUES (?, ?, ?, ?, ?, ?)\",\n user_id, QUOTED[\"symbol\"], QUOTED[\"name\"], shares, QUOTED[\"price\"], QUOTED[\"price\"])\n\n # If shares have been bought before\n else:\n new_shares_total = current_shares[0][\"shares_count\"] + shares\n shares_value_total = new_shares_total * QUOTED[\"price\"]\n db.execute(\"UPDATE shares SET shares_count = ?, price = ?, total = ? WHERE user_id = ? AND symbol = ?\",\n new_shares_total, QUOTED[\"price\"], shares_value_total, user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Bought!\", \"info\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n # Ensure there is proper symbol\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n else:\n quote = lookup(request.form.get(\"symbol\"))\n\n if not quote:\n return apology(\"Please enter a valid stock symbol\")\n\n # Ensure proper number of shares\n try:\n share = int(request.form.get(\"shares\"))\n if share < 0:\n return apology(\"Shares must be positive\")\n except:\n return apology(\"Shares msut be positive integer\")\n # Total Amount the user have to pay\n total_amount = quote[\"price\"] * share\n\n # Taking user's cash in account\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\",id=session[\"user_id\"])\n if float(cash[0][\"cash\"]) >= total_amount:\n # Update history table\n # Update do here bro\n # Update cash of user\n db.execute(\"UPDATE users SET cash = cash - :purchase WHERE id = :id\",id=session[\"user_id\"], purchase=(quote[\"price\"] * float(share)))\n\n # Select the users share of that symbol\n user_share = db.execute(\"SELECT shares FROM portfolio WHERE id=:id\",id=session[\"user_id\"])\n\n # If there is no stock in user's portfolio\n if not user_share:\n db.execute(\"INSERT INTO portfolio(id, name, shares, price, total, symbol) VALUES(:id, :name, :shares, :price, :total, :symbol)\",id=session[\"user_id\"]\n , name=quote[\"name\"], shares=share, price = usd(quote[\"price\"]), total = usd(total_amount), symbol = quote[\"symbol\"])\n #else increment share count\n else:\n total_shares = user_share[0][\"shares\"] + share\n db.execute(\"UPDATE portfolio SET shares = :shares WHERE id = :id AND symbol = :symbol\", shares = total_shares, id = session[\"user_id\"], symbol=quote[\"symbol\"])\n return redirect(\"/\")\n else:\n return apology(\"You Dont have enough cash \", 406)\n # User reach via another route(get)", "def buy():\n\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n shares = int(request.form.get(\"shares\"))\n quote = lookup(symbol)\n userid = session[\"user_id\"]\n\n if quote is None:\n return apology(\"Incorrect symbol, try again\", 400)\n else:\n rows = db.execute(\"SELECT cash FROM users WHERE id = :userid\",\n userid=userid)\n cash = rows[0][\"cash\"]\n price = quote[\"price\"]\n tot = price * shares\n\n if cash < tot:\n return apology(\"you can't afford this stock\")\n else:\n db.execute(\"UPDATE users SET cash = cash - :tot WHERE id = :userid\", tot=tot, userid=userid)\n db.execute(\"\"\"INSERT INTO purchase (userid, symbol, shares, tot)\n VALUES (:userid, :symbol, :shares, :tot)\"\"\", userid=userid,\n symbol=symbol, shares=shares, tot=tot)\n flash(\"Bought!\")\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n # Ensures symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"Must provide symbol\", 400)\n # Ensures shares was submitted\n if not request.form.get(\"shares\"):\n return apology(\"Must provide amount of shares\", 400)\n # Ensures what is inputed for shares is numeric\n if not request.form.get(\"shares\").isdigit():\n return apology(\"Must provide a valid amount of shares\", 400)\n\n # Sets quote to the information about symbol inputed by user\n quote = lookup(request.form.get(\"symbol\"))\n # Ensures symbol is a valid symbol that has a quote\n if not quote:\n return apology(\"Symbol invalid\", 400)\n # Cost of stock\n cost = quote[\"price\"]\n # Symbol of stock\n symbol = quote[\"symbol\"]\n # Name of stock\n name = quote[\"name\"]\n # Finds the amount of money user has to spend on stocks\n amount = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n # The virst value in the array is the amount of money user can spend\n money = amount[0][\"cash\"]\n # Total amount of money needed to buy the amount and type of stock user has inputed\n total = float(request.form.get(\"shares\")) * cost\n # If user is able to afford the stock(s), update the cash colomn and add info to portfolio table\n if money >= total:\n # Remaining is the amount of cash a user has left after buying the stock\n remaining = money - total\n # Inserts amount remaining into the cash field\n db.execute(\"UPDATE users SET cash = ':remaining' WHERE id=:userid\", remaining=remaining, userid=session[\"user_id\"])\n # Logs stock transaction in portfolio\n db.execute(\"INSERT INTO portfolio (userid, symbol, price, shares, TOTAL, transacted, name) VALUES(:userid, :symbol, :price, :shares, :TOTAL, :transacted, :name)\",\n userid=session[\"user_id\"], symbol=symbol, price=cost, shares=request.form.get(\"shares\"), TOTAL=total, transacted=datetime.datetime.now(), name=name)\n\n # If user cannot afford stock(s), return apology\n else:\n return apology(\"You do not have enough money\", 400)\n\n # Return back to index page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "def buy():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n shares = int(request.form.get(\"shares\"))\n price = round(float(quote[\"price\"]),2)\n if shares < 1:\n return apology(\"must provide a positive integer of shares\")\n \n # compare user's cash amount to calculated cost of shares\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n cost = round(float(shares * price),2)\n \n # return error if not enough cash\n if cost > cash[0][\"cash\"]:\n return apology (\"insufficient funds\")\n \n # if sufficient cash present, update users, portfolio and history tables with new info\n else:\n db.execute(\"UPDATE users SET cash = cash - :cost WHERE id = :id\", cost=cost, id = session[\"user_id\"])\n db.execute(\"UPDATE portfolio SET shares = shares + :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"],symbol=quote[\"symbol\"],shares=shares)\n db.execute(\"INSERT OR IGNORE INTO portfolio (id,symbol,shares) VALUES (:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=quote[\"symbol\"],shares=shares)\n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=shares,price=price)\n \n flash('Bought!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"buy.html\")", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\")\n elif not request.form.get(\"sharesnumber\"):\n return apology(\"must provide no of shares to sell\")\n elif '.' in request.form.get(\"sharesnumber\"):\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not request.form.get(\"sharesnumber\").isdigit():\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not int(request.form.get(\"sharesnumber\")) > 0:\n return apology(\"No of shares is positive value Invalid!!\")\n \n result_dict = lookup(request.form.get(\"symbol\"))\n \n if result_dict == None:\n return apology(\"Symbol does not exist\")\n \n \n #Check No of Shares\n no_of_shares = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol = :symbol\",id=session[\"user_id\"],symbol =request.form.get(\"symbol\"))\n no_of_shares = int(no_of_shares[0]['shares'])\n if int(request.form.get(\"sharesnumber\")) > no_of_shares:\n return apology(\"Sorry!! Don't Have Enough shares\")\n \n result_cash = db.execute(\"SELECT * from users where id = :id\",id=session[\"user_id\"])\n net_cash = result_cash[0][\"cash\"]\n net_worth = int(request.form.get(\"sharesnumber\")) * result_dict['price']\n \n \n \n #Update Cash\n net_cash = net_cash + net_worth\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",cash= net_cash,id=session[\"user_id\"])\n \n #Update History Tables\n \n db.execute(\"INSERT INTO history(user_id,symbol,price,shares) VALUES(:id,:symbol,:price,:shares) \",id=session[\"user_id\"],symbol=result_dict['symbol'],price=result_dict['price'],shares=(-1)*int(request.form.get(\"sharesnumber\")))\n \n #Check Whether user has shares for same symbol\n rows = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol=:symbol\",id=session[\"user_id\"],symbol=result_dict['symbol'])\n #Update NetShares Table\n if len(rows) == 0:\n db.execute(\"INSERT INTO netshares(user_id,symbol,shares) VALUES(:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=result_dict['symbol'],shares=request.form.get(\"sharesnumber\"))\n else:\n db.execute(\"UPDATE netshares SET shares=:shares WHERE user_id = :id AND symbol=:symbol\",shares= -int(request.form.get(\"sharesnumber\"))+int(rows[0]['shares']),id=session[\"user_id\"],symbol=result_dict['symbol'])\n return redirect(url_for(\"index\"))\n \n else:\n return render_template(\"sell.html\")\n #return apology(\"TODO\")", "def sell():\n if request.method == \"POST\":\n dict=lookup(request.form.get(\"symbol\"))\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\") or not lookup(request.form.get(\"symbol\")):\n return apology(\"Must provide valid symbol and positive integer\",400)\n else:\n row=db.execute(\"SELECT *FROM portofolio WHERE symbol=:s AND user_id=:u_i\",s=request.form.get(\"symbol\"),u_i=session[\"user_id\"])\n if len(row) == 0 or int(request.form.get(\"shares\")) > row[0][\"shares\"]:\n return apology(\"you don't have enough shares of this company\",400)\n else:\n db.execute(\"INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)\",s=dict[\"symbol\"],sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=time.asctime( time.localtime(time.time())),u_i=session[\"user_id\"],status='sold')\n db.execute(\"UPDATE portofolio SET shares =shares-:sh, price=:p, total=total-:t WHERE symbol=:s AND user_id=:u_i\",sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=dict[\"price\"] * int(request.form.get(\"shares\")),s=dict[\"symbol\"],u_i=session[\"user_id\"])\n db.execute(\"UPDATE users SET cash=cash+:extra WHERE id=:i\",extra=int(request.form.get(\"shares\")) * dict[\"price\"],i=session[\"user_id\"])\n db.execute(\"DELETE FROM portofolio WHERE shares=0\")\n return redirect(\"/\")\n else:\n rows=db.execute(\"SELECT *FROM portofolio where user_id=:u_i \",u_i=session[\"user_id\"])\n arr=[]\n for row in rows:\n arr.append(row['symbol'])\n return render_template(\"selling.html\",arr=arr)", "def sell():\n userid = session[\"user_id\"]\n stocks = db.execute(\"SELECT symbol FROM purchase WHERE userid = :userid GROUP BY symbol\",userid=userid)\n\n if request.method == \"POST\":\n symbol_sell = request.form.get(\"symbol\")\n shares_sell = float(request.form.get(\"shares\"))\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid=userid, symbol=symbol_sell)\n if shares_info[0][\"shares_sum\"] < shares_sell:\n return apology(\"You don't have that many shares\", 400)\n else:\n current = lookup(symbol_sell)\n price = current[\"price\"]\n amount = -shares_sell * price\n cash = db.execute(\"SELECT cash FROM users WHERE id =:userid\", userid=userid)\n balance = cash[0][\"cash\"] - amount\n db.execute(\"INSERT INTO purchase (userid, symbol, shares, tot) VALUES(:userid, :symbol, :shares, :tot)\",\n userid=userid, symbol=symbol_sell, shares=-shares_sell, tot=amount)\n db.execute(\"UPDATE users SET cash = :balance WHERE id = :userid\", balance=balance, userid=userid)\n flash(\"SOLD!!\")\n return redirect(\"/\")\n else:\n list_symbol = list()\n for symbol in stocks:\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid = userid, symbol=symbol[\"symbol\"])\n current_shares = shares_info[0]\n if shares_info[0][\"shares_sum\"]:\n list_symbol.append(symbol[\"symbol\"])\n return render_template(\"sell.html\", list_symbol=list_symbol)", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n # run lookup function\n dict_3 = lookup(symbol)\n if not dict_3:\n return apology(\"invalid symbol\")\n else:\n symbol = dict_3[\"symbol\"].upper()\n name = dict_3[\"name\"]\n price = dict_3[\"price\"]\n row_stock = db.execute(\"SELECT id FROM stocks WHERE symbol==:symbol\", symbol=symbol)\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n new_cash = row_cash[0][\"cash\"] - (price*shares)\n if new_cash < 0:\n return apology(\"Not enough cash\")\n # Add new stock symbol to table stocks\n if not row_stock:\n db.execute(\"INSERT INTO stocks(symbol, name) VALUES(:symbol, :name)\", symbol=symbol, name=name )\n # Get id from new inserted stock\n row_stock = db.execute(\"SELECT id FROM stocks WHERE symbol==:symbol\", symbol=symbol)\n # Insert new transaction in 'history' table\n db.execute(\"INSERT INTO history(user_id, stock_id, price, shares, buy) VALUES(:user_id, :stock_id, :price, :shares, :buy)\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"], price=price, shares=shares, buy=1)\n # INSERT information in 'portfolio' table\n row_portfolio = db.execute(\"SELECT stock_id FROM portfolio WHERE user_id==:user_id and stock_id=:stock_id\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"])\n if not row_portfolio:\n db.execute(\"INSERT INTO portfolio(user_id, stock_id, shares) VALUES(:user_id, :stock_id, :shares)\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"], shares=shares)\n else:\n # UPDATE shares in 'portfolio' table\n shares_db = db.execute(\"SELECT shares FROM portfolio WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"])\n if shares_db:\n new_shares = shares_db[0][\"shares\"]+shares\n db.execute(\"UPDATE portfolio SET shares==:shares WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=row_stock[0][\"id\"], shares=new_shares)\n # Update cash in 'users' table\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # message to be retrieved in portfolio.html when user buys stock\n flash('Bought!')\n return redirect(\"/\")", "async def buy(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n price = self.iex.price(symbol)\r\n cost = quantity * price\r\n if company.balance < cost:\r\n await ctx.send(f\"{company.name}\\nBalance: {company.balance} USD\\nPurchase cost: {cost} USD\")\r\n raise StonksError()\r\n\r\n value = price * quantity\r\n self.iex.buy(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``-{value} {company.name} ⯮ {quantity} {symbol} @ {price}``\")", "def buy():\n\n # if user reached route via GET return them an input form\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n # if user reached route via POST (as by submitting a form via POST)\n elif request.method == \"POST\":\n\n # get id as it is used many times\n id = session[\"user_id\"]\n\n # get symbol as it is used many times\n symbol = request.form.get(\"symbol\")\n\n # get share volume requested\n volume = int(request.form.get(\"volume\"))\n\n # ensure stock symbol was submitted\n if not symbol:\n return apology(\"you must provide a stock symbol\")\n\n # ensure positive volume (integer rule handled elsewhere)\n elif volume <= 0:\n return apology(\"volume must be integer greater than 0\")\n\n # lookup stock on yahoo\n stock_info = lookup(symbol)\n\n # if error looking stock up\n if not stock_info:\n return apology(\"that stock symbol doesn't exist\")\n\n # query database for cash balance\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=id)\n cash = cash[0]['cash']\n\n # cost of requested shares\n purchase_cost = volume * stock_info['price']\n\n # if sufficient cash, make purchase, else return apology\n if purchase_cost <= cash:\n\n # check if user already owns any stock in this company\n existing = db.execute(\"SELECT num_shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=id, symbol=symbol)\n\n # if no existing shares, add them\n if not existing:\n new = db.execute(\"INSERT INTO portfolio (id, symbol, num_shares) VALUES(:id, :symbol, :num_shares)\", id=id, symbol=symbol, num_shares=volume)\n\n # if there are existing shares, add new volume to them\n else:\n add = db.execute(\"UPDATE portfolio SET num_shares = :num_shares WHERE id = :id AND symbol = :symbol\", num_shares=existing[0]['num_shares'] + volume, id=id, symbol=symbol)\n\n # set date string\n dstring = time(str(datetime.datetime.utcnow()))\n\n # update transaction history\n result2 = db.execute(\"INSERT INTO `transaction` (id, symbol, volume, share_price, dtstamp) VALUES(:id, :symbol, :volume, :share_price, :dtstamp)\", id=id, symbol=symbol, volume=volume, share_price=stock_info['price'], dtstamp=dstring)\n\n # reduce cash balance\n result = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash-purchase_cost, id=id)\n\n # redirect user to home page\n return redirect(url_for(\"index\"))\n else:\n return apology(\"insufficient funds\")", "def sell():\n if request.method == \"POST\":\n bef = db.execute(\"SELECT symbol FROM ind WHERE user_id = ?\", session[\"user_id\"])\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which valid stock to sell\", 403)\n symbol = request.form.get(\"symbol\")\n p = db.execute(\"SELECT COUNT(symbol) FROM ind WHERE user_id = ?\", session[\"user_id\"])\n q = 0\n\n for i in range(int(p[0][\"COUNT(symbol)\"])):\n if symbol == bef[i][\"symbol\"]:\n q = 1\n if q == 0:\n return apology(\"Please specify which valid stock to sell\", 403)\n if not request.form.get(\"shares\"):\n return apology(\"Please specify how many stocks you want to sell\", 403)\n if int(request.form.get(\"shares\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"shares\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) < int(request.form.get(\"shares\")):\n return apology(\"You do not own that many shares\", 403)\n shares = int(request.form.get(\"shares\"))\n db.execute(\"CREATE TABLE IF NOT EXISTS sells (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, shares INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"shares\"))\n money = bro[0][\"cash\"]\n money = money + cost\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"INSERT INTO sells(user_id, symbol, name, price, shares, cost, time) VALUES (:user_id, :symbol, :name, :price, :shares, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], shares = shares, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"SOLD\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = shares, cost = cost, time = datetime.datetime.now())\n\n db.execute(\"UPDATE ind SET nos = ? WHERE symbol = ? AND user_id = ?\", int(hav[0][\"nos\"]) - shares, request.form.get(\"symbol\"), session[\"user_id\"])\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) == 0:\n db.execute(\"DELETE FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n return redirect(\"/\")\n\n else:\n stocks = db.execute(\"SELECT * FROM ind WHERE user_id = ?\", session[\"user_id\"])\n\n return render_template(\"sell.html\", stocks = stocks)", "def sell():\n\n table = db.execute(\"SELECT symbol FROM portfolio WHERE id=:id\", id=session[\"user_id\"])\n symbols = []\n for i in range(len(table)):\n symbols.append(table[i][\"symbol\"])\n\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = int(db.execute(\"SELECT shares FROM portfolio where id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"shares\"])\n\n if owned_shares < int(request.form.get(\"shares\")):\n return apology(\"Too many shares\", 400)\n\n updated_shares = owned_shares - int(request.form.get(\"shares\"))\n\n # update shares in portfolio\n if updated_shares > 0:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE id=:id AND symbol=:symbol\",\n shares=updated_shares, id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n else:\n db.execute(\"DELETE FROM portfolio WHERE id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n # update cash in database\n quote = lookup(request.form.get(\"symbol\"))\n amount = quote[\"price\"] * float(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash = cash + :amount WHERE id=:id\", amount=amount, id=session[\"user_id\"])\n\n db.execute(\"INSERT INTO histories (symbol, shares, price, id) VALUES(:symbol, :shares, :price, :id)\",\n symbol=quote[\"symbol\"], shares=0-int(request.form.get(\"shares\")), price=usd(quote[\"price\"]), id=session[\"user_id\"])\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n return render_template(\"sell.html\", symbols=symbols)", "def field_buy(self, symbol):\r\n\r\n end_percent = 150\r\n current_price = 15#self.get_price()\r\n self.log(current_price)\r\n buys = {}\r\n new_price = current_price * 1.05\r\n while (new_price / current_price) > 150:\r\n self.log(\"New sell at: {}\".format(new_price))\r\n new_price *= 1.05\r\n\r\n self.log(buys)\r\n\r\n return buys", "def buy_stock(self, stock, amount, date=None):\n if date is None:\n date = self.date\n\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n self.order_stock(stock, self.stock_data[stock].position['Position'][date] + amount, date)", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not lookup(request.form.get(\"symbol\")):\n return apology(\"must provide valid symbol\",400)\n if not request.form.get(\"shares\") or int(request.form.get(\"shares\")) <= 0:\n return apology(\"shares must be positive integer!\",400)\n row=db.execute(\"SELECT * FROM users WHERE id=:s\",s=session[\"user_id\"])\n dict=lookup(request.form.get(\"symbol\"))\n cost=dict[\"price\"]* int(request.form.get(\"shares\"))\n if row[0][\"cash\"]>cost:\n db.execute(\"INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)\",s=dict[\"symbol\"],sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=time.asctime( time.localtime(time.time())),u_i=session[\"user_id\"],status='bought')\n row[0][\"cash\"]=row[0][\"cash\"]-cost\n db.execute(\"UPDATE users SET cash = :cash WHERE id=:s\",cash=row[0][\"cash\"],s=session[\"user_id\"])\n exist=db.execute(\"SELECT * FROM portofolio WHERE symbol=:s AND user_id=:u_i\",s=dict[\"symbol\"],u_i=session[\"user_id\"])\n if len(exist) == 0 :\n db.execute(\"INSERT INTO portofolio(symbol,name,shares,price,total,user_id) VALUES (:s,:n,:sh,:p,:t,:u_i)\",s=dict[\"symbol\"],n=dict[\"name\"],sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=cost,u_i=session[\"user_id\"])\n else:\n db.execute(\"UPDATE portofolio SET shares =shares+:sh, price=:p, total=total+:t WHERE symbol=:s AND user_id=:u_i\",sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=dict[\"price\"] * int(request.form.get(\"shares\")),s=dict[\"symbol\"],u_i=session[\"user_id\"])\n else:\n return apology(\"Can't afford!\",400)\n\n\n\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n shares = request.form.get('shares')\n\n if not symbol or not shares or symbol == \"Select Stock\":\n return apology(\"Please input a valid symbol and number of shares\")\n elif int(shares) <= 0:\n return apology(\"Please input a positive number for shares\")\n else:\n symbol = symbol.lower()\n shares = int(shares)\n get_cur_shares = db.execute(\n \"SELECT SUM(shares) FROM History WHERE id = :id AND symbol = :symbol GROUP BY symbol\", id=session['user_id'], symbol=symbol)\n try:\n cur_shares = [share['SUM(shares)'] for share in get_cur_shares][0]\n except IndexError:\n return apology(\"Please input a valid number of shares\")\n if shares > cur_shares:\n return apology(\"Sorry, you don't have enough shares to sell\")\n else:\n cur_price = float(lookup(symbol)['price'])\n sell_val = cur_price * float(shares)\n sell_val = float(sell_val)\n get_bal = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n balance = [bal['cash'] for bal in get_bal][0]\n balance = float(balance)\n new_balance = balance + sell_val\n company = lookup(symbol)['name']\n new_database_balance = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",\n cash=new_balance, id=session['user_id'])\n new_database_transaction = db.execute(\"INSERT INTO History ('symbol', 'company', 'shares', 'price', 'totalprice', 'id', 'transaction_type') VALUES (:symbol, :company, :shares, :price, :totalprice, :id, :transaction_type)\",\n symbol=symbol, company=company, shares=-shares, price=cur_price,\n totalprice=sell_val, id=session['user_id'], transaction_type=\"SELL\")\n return redirect(\"/\")\n else:\n get_symbols = db.execute(\n \"SELECT symbol FROM History WHERE id = :id GROUP BY symbol HAVING SUM(shares) > 0\", id=session['user_id'])\n if not get_symbols:\n return apology(\"Sorry, could not find valid symbol\")\n else:\n symbols = [symbol['symbol'] for symbol in get_symbols]\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"Symbol\")\n if symbol is None:\n return apology(\"Enter a symbol\", 403)\n shares = request.form.get(\"Shares\")\n if int(shares) < 0:\n return apology(\"Please enter postive shares\", 403)\n\n stock = lookup(symbol)\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n for row in rows:\n if row[\"Symbol\"] == symbol:\n if int(shares) > row[\"totalShares\"]:\n return apology(\"Too many shares\")\n\n rows = db.execute(\"SELECT Cash FROM cash WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"Cash\"]\n\n current_cash = cash + int(shares)*stock[\"price\"]\n db.execute(\"UPDATE cash SET Cash=:current_cash WHERE id=:id\", current_cash = current_cash, id=session[\"user_id\"])\n db.execute(\"INSERT INTO cash (id, Symbol, Name, Shares) VALUES (:id, :Symbol, :Name, :Shares)\", id=session[\"user_id\"], Symbol=stock[\"symbol\"], Name=stock[\"name\"], Shares=-1*int(shares))\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n rows = db.execute(\"SELECT Symbol FROM cash WHERE id=:id GROUP BY Symbol HAVING SUM(Shares) > 0\", id=session[\"user_id\"])\n # Shorthand for obtaining the symbol for every row in rows. So would output AAPL e.g.\n return render_template(\"sell.html\", symbols=[ row[\"Symbol\"] for row in rows ])", "def buy():\n\n if request.method == \"POST\":\n numShares = 0\n try:\n numShares = float(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Enter a numerical value!\", 400)\n if numShares % 1 != 0:\n return apology(\"Fractional Shares not allowed!\", 400)\n if numShares <= 0:\n return apology(\"Enter a number greater than 0!\", 400)\n if not request.form.get(\"symbol\"):\n return apology(\"Enter a symbol!\", 400)\n if not request.form.get(\"shares\"):\n return apology(\"Enter a number of shares!\", 400)\n\n company = lookup(request.form.get(\"symbol\"))\n if not company:\n return apology(\"Invalid ticker symbol\", 400)\n price = float(company[\"price\"])\n total = float(price * numShares)\n symbl = company[\"symbol\"]\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n remainingCash = float(userRows[0][\"cash\"])\n if total > remainingCash:\n return apology(\"You cannot afford the stock(s)!\", 400)\n else:\n currentUser = session.get(\"user_id\")\n purchased = db.execute(\"INSERT INTO portfolio (UserID, Symbol, Company, NumberOfShares, UnitPrice, TotalPrice) VALUES(:userid, :symbol, :name, :shares, :unitPrice, :totalPrice)\", userid=currentUser, symbol=symbl, name=company['name'], shares=numShares, unitPrice=price, totalPrice=total)\n\n\n if not purchased:\n return apology(\"Unable to purchase\", 400)\n else:\n remainingCash = remainingCash - total\n db.execute(\"UPDATE users set cash=:balance WHERE id=:userid\", balance=remainingCash, userid=currentUser)\n '''Update history'''\n dateNow = datetime.datetime.now()\n historized = db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbol, :shares, :price, :date, :userid)\", symbol = symbl, shares = numShares, price = total, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n\n else:\n return render_template(\"buy.html\")", "def buy():\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure stock symbol and share validity\n if lookup(request.form.get(\"symbol\")) == None:\n return apology(\"invalid stock symbol\", 403)\n elif int(request.form.get(\"shares\")) < 1:\n return apology(\"must purchase at least one stock\", 403)\n\n # Compute the value of the purchase\n price = lookup(request.form.get(\"symbol\"))[\"price\"]\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])[0][\"cash\"]\n total = price * int(request.form.get(\"shares\"))\n\n # Ensure the user has enough cash to pay for the stocks\n if total > cash:\n return apology(\"not enough cash to purchase\", 403)\n\n # Insert into database that is used to retrieve history\n db.execute(\"INSERT INTO purchase (id, symbol, shares, price, created_at) VALUES(:id,:symbol,:shares,:value, datetime('now'))\", id=session[\"user_id\"], symbol=request.form.get(\"symbol\"), shares=request.form.get(\"shares\"), value=price)\n\n # Insert into database that is used for the index page\n number = db.execute(\"SELECT COUNT(*) FROM purchase WHERE id=:id AND symbol=:symbol\", id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"COUNT(*)\"]\n\n # Insert into database if the current stock has not been purchased before\n if number == 1:\n db.execute(\"INSERT INTO summary (id, symbol, shares, price) VALUES(:id,:symbol,:shares,:value)\", id=session[\"user_id\"], symbol=request.form.get(\"symbol\"), shares=request.form.get(\"shares\"), value=price)\n\n # Update database if the stock has been purchased before\n else:\n share = db.execute(\"SELECT SUM(shares) FROM purchase WHERE id = :username AND symbol= :symbol\", username=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"SUM(shares)\"]\n db.execute(\"UPDATE summary SET shares= :shares WHERE (id = :username AND symbol= :symbol)\", shares=share, username=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n db.execute(\"UPDATE users SET cash = :new\", new = cash - total)\n\n # Redirect users to the index page\n return redirect(\"/\")\n\n # User reached route via GET (as by submitting a form via GET)\n else:\n return render_template(\"buy.html\")", "def buy():\n username = session.get(\"username\")\n # print(f'username: {username}')\n\n if request.method==\"POST\":\n symbol = request.form.get(\"symbol\")\n quantity = request.form.get(\"shares\")\n if not quantity.isdigit() or int(quantity)<=0:\n return apology(\"Quantity must be a positive integer\", 400)\n quantity = int(quantity)\n price = 0\n message = \"\"\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n response = lookup(symbol)\n if not response:\n return apology(\"Invalid symbol\", 400)\n\n price = response[\"price\"]\n name = response[\"name\"]\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n cost = price * float(quantity)\n status = \"bought\"\n if cash >= cost:\n cash -= cost\n db.execute(\"UPDATE users SET cash=:cash WHERE username=:username\", cash=cash, username=username)\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :stock_symbol, :unit_price, :time, :quantity, :name, :status)\",\n username = username, stock_symbol=symbol, unit_price=price, time=time, quantity=quantity, name=name, status=status)\n message = f'Recorded purchase {quantity} share(s) of {name} for total of {usd(cost)}, your remaining cash is {usd(cash)}'\n return render_template(\"buy.html\", message=message)\n else:\n return apology(\"Not enough cash\", 400)\n else:\n return render_template(\"buy.html\")", "def buy(self, date_idx: int, cash_balance: float, buy_budget: float) -> float:\n todays_price: float = float(self.price_history.iat[date_idx, 1])\n bought_shares: float = buy_budget / todays_price\n self.shares = bought_shares\n new_cash_balance: float = cash_balance - buy_budget\n self.last_bought_at_price = todays_price\n if Helpers.is_verbose_on():\n print(f\"{self.ticker}: buy {self.shares:.2f} shares at {todays_price:.2f} \"\n f\"for ${buy_budget:.2f} on date {date_idx}. Cash balance: {new_cash_balance:.2f}\")\n return new_cash_balance", "def sell():\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n tick = request.form.get(\"ticker\")\n quote = lookup(tick)\n if not quote:\n return apology(\"Ticker does not exist\")\n shares = int(request.form.get(\"shares\"))\n if shares <= 0:\n return apology(\"Please input a valid number of shares\")\n money = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n #if shares < int(money[0][\"shares\"]):\n # return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", id=session[\"user_id\"], purchase=(quote[\"price\"] * float(shares)))\n findshares = db.execute(\"SELECT shares FROM purchases WHERE user_id = :id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"])\n \n \n if not findshares:\n return apology(\"You don\"t have those shares >:(\")\n else:\n if int(findshares[0][\"shares\"]) < int(shares):\n return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE purchases SET shares=:number, total=:total WHERE user_id=:id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"], total=(float(quote[\"price\"])*float(shares)), number=int(findshares[0][\"shares\"]) - int(shares))\n return redirect(url_for(\"index\"))\n\nif __name__ == \"__main__\":", "def sell():\n if request.method == \"GET\":\n rows = db.execute(text(\n \"SELECT symbol, sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id GROUP BY symbol\"),\n id=session[\"user_id\"])\n symbols = [row[\"symbol\"] for row in rows if row[\"shares\"]]\n return render_template(\"sell.html\", symbols=symbols,\n symbol=request.args.get(\"symbol\"))\n\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = db.execute(text(\n \"SELECT sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id AND symbol=:symbol\"),\n id=session[\"user_id\"],\n symbol=request.form.get(\"symbol\")).fetchone()[\"shares\"]\n requested_shares = int(request.form.get(\"shares\"))\n if requested_shares > owned_shares:\n return apology(\"too many shares\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n db.execute(text(\n \"INSERT INTO transactions (user_id, symbol, shares, price) \"\n \"VALUES (:u, :sy, :sh, :p)\"),\n u=session[\"user_id\"],\n sy=request.form.get(\"symbol\"),\n sh=-requested_shares,\n p=quote[\"price\"])\n sell_price = int(request.form.get(\"shares\")) * quote[\"price\"]\n db.execute(text(\"UPDATE users SET cash=cash+:c WHERE id=:id\"),\n c=sell_price,\n id=session[\"user_id\"])\n flash(\"Sold!\")\n return redirect(\"/\")", "def sell():\n \n user_id = session[\"user_id\"]\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n \n if request.method == \"POST\":\n \n # get required symbol\n symbol = request.form.get(\"symbol\").upper()\n try:\n qty = int(request.form.get(\"qty\"))\n except ValueError:\n return apology(\"QTY is empty!\", 403)\n \n # proceed buy function\n sell_result: Tuple[float, str] = sell_shares(db, user_id, symbol, qty )\n if sell_result[0] == -1:\n return apology(sell_result[1], 403)\n\n return redirect(\"/\")", "def buy_to_open(self, symbol, date, price):\n\n # Figure out how much we are willing to spend\n cash_available = self.cash - self.trade_fee\n cash_to_spend = cash_available / self.free_position_slots\n \n # Calculate buy_price and number of shares. Fractional shares allowed.\n purchase_price = (1 + self.percent_slippage) * price\n shares = cash_to_spend / purchase_price\n\n # Spend the cash\n self.cash -= cash_to_spend + self.trade_fee\n assert self.cash >= 0, 'Spent cash you do not have.'\n self.portfolio_history.record_cash(date, self.cash) \n\n # Record the position\n positions_by_symbol = self.active_positions_by_symbol\n assert not symbol in positions_by_symbol, 'Symbol already in portfolio.' \n position = Position(symbol, date, purchase_price, shares)\n positions_by_symbol[symbol] = position", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\", symbol=request.args.get(\"symbol\"))\n\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n quote = lookup(request.form.get(\"symbol\"))\n if not quote:\n return apology(\"invalid symbol\", 400)\n\n cash = db.execute(text(\"SELECT * FROM users WHERE id = :id\"),\n id=session[\"user_id\"]).fetchone()[\"cash\"]\n purchase_price = int(request.form.get(\"shares\")) * quote[\"price\"]\n # Cast decimal.Decimal (from Postgres numeric) to float.\n if purchase_price > float(cash):\n return apology(\"can't afford\", 400)\n\n db.execute(text(\n \"INSERT INTO transactions (user_id, symbol, shares, price) \"\n \"VALUES (:u, :sy, :sh, :p)\"),\n u=session[\"user_id\"],\n sy=request.form.get(\"symbol\"),\n sh=request.form.get(\"shares\"),\n p=quote[\"price\"])\n db.execute(text(\"UPDATE users SET cash=cash-:c WHERE id=:id\"),\n c=purchase_price,\n id=session[\"user_id\"])\n flash(\"Bought!\")\n return redirect(\"/\")", "def buy():\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n # User reached route via POST (as by submitting a form via POST)\n shares = int(request.form.get(\"shares\"))\n symbol = request.form.get(\"symbol\")\n quote = lookup(symbol)\n\n if not quote:\n return apology(\"invalid symbol\", 404)\n\n price = quote['price']\n value = round(shares * price, 2)\n user = Users.query.get(session.get(\"user_id\"))\n\n if value > user.cash:\n return apology(\"You don't have enough cash\", 406)\n\n record = Records(symbol=quote['symbol'], company_name=quote['name'],\n transact_type=\"buy\", shares=shares, price=price, user_id=user.id)\n user.cash -= value\n db.session.add(record)\n db.session.commit()\n\n flash(\"Bought\")\n return redirect(url_for('index'))", "def buy(self, amount):\n trades = []\n buy_amount = 0\n precision = pow(10, self.pair.get_quote_token().get_decimals() - self.pair.get_base_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_SELL])):\n offer = self.book[Trade.WAY_SELL][i]\n amount_quote = offer.get_quote_amount()\n amount_base = offer.get_base_amount()\n price = offer.get_price()\n\n if amount_base >= amount:\n tmp = int(\"%d\" % (amount / price * precision))\n trade = Trade(self.pair, Trade.WAY_BUY, price, amount, tmp, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n buy_amount = buy_amount + trade.get_amount_quote()\n trades.append(trade)\n return trades, int(buy_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue with next offer.\n '''\n trade = Trade(self.pair, Trade.WAY_BUY, price, amount_base, amount_quote, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n buy_amount = buy_amount + trade.get_amount_quote()\n amount = amount - amount_base\n trades = trades + [trade]\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def buy():\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n if request.method == \"POST\":\n #Access the form data\n symbol = request.form.get(\"symbol\")\n\n #Check if the shares was an integer\n try:\n quantity = int(request.form.get(\"shares\"))\n except:\n return apology (\"Please enter a whole number\", 400)\n\n\n if int(quantity) < 0:\n return apology (\"Please enter a positive value\", 400)\n\n #Lookup the stock symbol data (price, symbol, company name)\n stock = lookup(symbol)\n\n if not symbol:\n return apology (\"Invalid ticker symbol\", 400)\n\n if not stock:\n return apology (\"Invalid ticker symbol\", 400)\n\n stock_price = stock['price']\n\n #Get the current percent change of the stock\n changePercent = stock['changePercent']\n\n #Created a new table using CREATE TABLE 'portfolio' ('user' text, 'quantity' integer, 'price' numeric(15, 2), 'symbol' text)\n\n #Get the total cash value of the user from the database\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float\n check_cash = float(get_cash[0]['cash'])\n\n #Get the current date and time\n now = datetime.now()\n\n date_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n if not stock:\n return apology (\"Please enter a valid stock\", 403)\n\n #Compute the total amount of the shares bought (One company stock only)\n total = stock_price * float(quantity)\n\n if total > check_cash:\n return apology(\"Not enough cash\", 403)\n\n #Check if the cash on hand is enough to purchase the order.\n if check_cash > total:\n #Update the total amount of cash in hand by subtracting the ordered stocks.\n db.execute(\"UPDATE users SET cash = cash - :total WHERE id = :id\", id=session[\"user_id\"], total=total)\n\n\n #Check if the total cash is enough for the stock purchase.\n if total < check_cash:\n #Query if the stock symbol is already in the portfolio.\n rows = db.execute(\"SELECT * FROM portfolio WHERE symbol = :symbol AND id = :id\", id=session[\"user_id\"], symbol=symbol)\n\n #Add the stock in the history table\n history = db.execute(\"INSERT INTO history (symbol, quantity, price, transacted, id) VALUES (?, ?, ?, ?, ?)\", symbol, int(quantity), float(stock_price), date_time, session[\"user_id\"] )\n\n #If the stock already exists in the portfolio. Update the quantity.\n if len(rows) == 1:\n db.execute(\"UPDATE portfolio SET quantity = quantity + :quantity, total = total + :total, stock_price = :stock_price WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol=symbol, quantity=quantity, total=total, stock_price = float(stock_price))\n flash('You successfuly bought the stock')\n else:\n #Insert the user, shares bought, shares price, and the quantity bought in portfolio table.\n db.execute(\"INSERT INTO portfolio (quantity, total, symbol, id, stock_price, name, percent_change) VALUES (?, ?, ?, ?, ?, ?, ?)\", int(quantity), total, symbol, session['user_id'], float(stock_price), stock['name'], changePercent)\n flash('You successfully bought the stock!')\n\n #return redirect (url_for('index'))\n return render_template(\"buy.html\")", "async def trade(self, ctx, sell_amount : float, sell_symbol, \n buy_amount : float, buy_symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(sell_amount, sell_symbol)\n portfolio.Buy(buy_amount, buy_symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (user, portfolio.Value()))\n portfolio.Save()", "async def stocks(self, ctx):\n\t\tpass", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n elif request.method == \"POST\":\n shares = request.form.get(\"shares\")\n symbol = request.form.get(\"symbol\")\n try:\n float(shares)\n except ValueError:\n return apology(\"please input a valid number of shares\")\n try:\n int(shares)\n except ValueError:\n return apology(\"please input a valid number of shares\")\n shares = int(shares)\n\n if not shares or not float(shares) or not float(shares).is_integer() or float(shares) <= 0:\n return apology(\"input a valid number of shares to buy\")\n\n elif not symbol or not lookup(symbol):\n return apology(\"input a valid symbol\")\n\n elif type(shares) != int:\n return apology(\"How did you even get this error?!\")\n\n else:\n quote = lookup(symbol)\n current_price = float(quote[\"price\"])\n company = quote[\"name\"]\n shares_num = int(request.form.get(\"shares\"))\n shares_tcost = float(shares_num * current_price)\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n # balance[0] b/c the returned value of balance is a dict of multiple lists\n flbal = [float(i) for i in list(balance[0].values())]\n for bal in flbal:\n if bal - shares_tcost < 0:\n return apology(\"Sorry, you don't have enough money\")\n else:\n newshares = bal - shares_tcost\n newbalance = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=newshares, id=session['user_id'])\n newpurchase = db.execute(\"INSERT INTO History ('symbol', 'company', 'shares', 'price', 'totalprice', 'id', 'transaction_type') VALUES (:symbol, :company, :shares, :price, :totalprice, :id, :transaction_type)\",\n symbol=symbol, company=company, shares=shares_num, price=current_price, totalprice=shares_tcost, id=session['user_id'], transaction_type=\"BUY\")\n\n return redirect('/')", "def sell():\n\n if request.method == \"POST\":\n sellstock = request.form.get(\"symbol\")\n sellq = int(request.form.get(\"shares\"))\n if sellstock == None:\n return apology(\"Please select a stock symbol to sell.\")\n if sellq < 0:\n return apology(\"Please enter a valid quantity of stocks to sell\")\n invq = db.execute(\"SELECT quantity FROM inventory WHERE userid = :uid AND symbol = :sy\",\n {\"uid\":session[\"user_id\"],\"sy\":sellstock})[0][\"quantity\"]\n if sellq > invq:\n return apology(\"You don't have enough shares.\")\n stock = lookup(sellstock)\n cost = round(sellq*stock[\"price\"], 2)\n db.execute(\"INSERT INTO shares (stock,symbol,value,quantity,cost,userid) VALUES(:st,:sy,:va,:qu,:co,:uid)\",\n {\"st\":stock[\"name\"],\"sy\":sellstock,\"va\":stock[\"price\"],\"qu\":sellq,\"co\":cost,\"uid\":session[\"user_id\"]})\n db.execute(\"UPDATE inventory SET quantity = :qu WHERE userid =:uid AND symbol = :sy\",\n {\"qu\":(invq-sellq),\"uid\":session[\"user_id\"],\"sy\":sellstock})\n db.execute(\"UPDATE users SET cash = cash + :cash WHERE id =:uid\", {\"cash\":cost,\"uid\":session[\"user_id\"]})\n flash(\"Shares successfully sold!\")\n return redirect(\"/\")\n inventory = db.execute(\"SELECT symbol FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n return render_template(\"sell.html\", context = inventory)", "def buy():\n if request.method == \"POST\":\n\n # Ensure buy order\n if not request.form.get(\"symbol\"):\n return apology(\"must provide valid order info\", 400)\n\n # Ensure buy order\n elif not request.form.get(\"shares\"):\n return apology(\"must provide valid order info\", 400)\n\n # Ensure stock is balid else display an apology\n elif lookup(request.form.get(\"symbol\")) == None:\n return apology(\"invalid stock\", 400)\n\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"shares must be a positive integer\", 400)\n\n\n # Check if its negative\n #elif int(request.form.get(\"shares\")) < 1:\n # return apology(\"must provide valid order info\", 400)\n\n\n # Add stock to user's portfolio\n\n stock = lookup(request.form.get(\"symbol\"))['name']\n num = request.form.get(\"shares\")\n price = (lookup(request.form.get(\"symbol\"))['price'])\n user = session.get(\"user_id\")\n amount = (float(request.form.get(\"shares\")) * float(lookup(request.form.get(\"symbol\"))['price']))\n\n # check if they have enough cash\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE id = :id\", id = session.get(\"user_id\"))\n rows = float(rows[0][\"cash\"])\n\n\n # Add trasnaction to portfolio if user has enough cash\n if (float(num) * float(price)) <= rows:\n result = db.execute(\"INSERT INTO portfolio (User, Stock, Price, Num) VALUES(:User, :Stock, :Price, :Num)\", User = session.get(\"user_id\"), Stock = stock, Price = usd(price), Num = num)\n if not result:\n return apology(\"TX did not recrod\", 400)\n# Update cash\n result = db.execute(\"UPDATE users set cash = cash - :amount where id = :User \", User = session.get(\"user_id\"), amount = amount)\n if not result:\n return apology(\"Cash did not update\", 400)\n\n # Redirect user to home page\n return redirect(\"/\")\n else:\n\n return apology(\"Not enough Cash\", 403)\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which stock to buy\", 403)\n if not request.form.get(\"nos\"):\n return apology(\"Please specify how many stocks you want to buy\", 403)\n if int(request.form.get(\"nos\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"nos\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n symbol = request.form.get(\"symbol\")\n if not lookup(symbol):\n return apology(\"Invalid symbol\", 403)\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"nos\"))\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n money = bro[0][\"cash\"]\n if cost > money:\n return apology(\"Cannot afford\", 400)\n money = money - cost\n bef = db.execute(\"SELECT COUNT (?) FROM ind WHERE user_id = ?\", lookup(symbol)[\"symbol\"], session[\"user_id\"])\n if len(bef):\n tot = 0\n nob = 0\n tota = cost\n\n else:\n tot = db.execute(\"SELECT total FROM ind where symbol = ?\", lookup(symbol)[\"symbol\"])\n no = db.execute(\"SELECT nos FROM ind where symbol = ?\", lookup(symbol)[\"symbol\"])\n nob = no[0][\"nos\"]\n tota = tot[0][\"total\"] - cost\n\n\n\n\n nos = int(request.form.get(\"nos\"))\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"CREATE TABLE IF NOT EXISTS buys (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, nos INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"BOUGHT\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = nos, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO buys(user_id, symbol, name, price, nos, cost, time) VALUES (:user_id, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = nos, cost = cost, time = datetime.datetime.now())\n bef = db.execute(\"SELECT symbol FROM ind WHERE symbol=:symbol AND user_id=:id\", symbol=lookup(symbol)[\"symbol\"], id=session[\"user_id\"])\n\n # add to portfolio database\n # if symbol is new, add to portfolio\n if not bef:\n db.execute(\"INSERT INTO ind (symbol, name, nos, user_id, price, total) VALUES (:symbol, :name, :nos, :id, :price, :total)\",\n name = lookup(symbol)[\"name\"], symbol=lookup(symbol)[\"symbol\"], nos=int(request.form.get(\"nos\")), id = session[\"user_id\"], price = lookup(symbol)[\"price\"], total = cost)\n\n # if symbol is already in portfolio, update quantity of shares and total\n else:\n db.execute(\"UPDATE ind SET nos=nos+:nos WHERE symbol=:symbol AND user_id=:id\",\n nos=int(request.form.get(\"nos\")), symbol=lookup(symbol)[\"symbol\"], id = session[\"user_id\"]);\n return redirect(\"/\")\n\n\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n price = lookup(symbol)['price']\n \n if not request.form.get('ammount').isnumeric() or int(request.form.get('ammount')) % 100 != 0:\n return apology(\"The ammount is not a valid number, should be a multiple of 100\", 501)\n\n ammount = int(request.form.get('ammount'))\n cost = price * ammount\n\n current_stock = db.execute(\"SELECT * FROM stocks WHERE user_id = ? AND symbol = ?\", session[\"user_id\"], symbol)\n current_cash = db.execute(\"SELECT * FROM users WHERE id = ?\", session[\"user_id\"])\n \n if cost > current_cash[0][\"cash\"]:\n return apology(\"Not enough money\", 999)\n else:\n update_database(session[\"user_id\"], symbol, ammount, price, \"buy\", current_stock, current_cash[0])\n \n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n sharesToSell = int(request.form.get(\"shares\"))\n if sharesToSell < 0:\n return apology(\"Shares to sell cannot be negative\", 400)\n\n sharesRows = db.execute(\"SELECT * FROM portfolio WHERE UserID = :userid AND Symbol = :enteredSymbol\",\n userid=session.get(\"user_id\"), enteredSymbol = symbol)\n\n numSharesOwned = 0\n for row in sharesRows:\n numSharesOwned += row[\"NumberOfShares\"]\n\n if numSharesOwned < sharesToSell:\n return apology(\"You don't own that many shares!\", 400)\n\n remainingSharesToSell = sharesToSell\n for row in sharesRows:\n numShares = row[\"NumberOfShares\"]\n if remainingSharesToSell >= numShares:\n '''delete row'''\n delete = db.execute(\"DELETE FROM portfolio WHERE id = :rowid\", rowid = row[\"id\"])\n remainingSharesToSell -= numShares\n else:\n '''update row'''\n updatedShares = numShares - remainingSharesToSell\n update = db.execute(\"UPDATE portfolio SET NumberOfShares = :numshares, TotalPrice = :tp WHERE id = :rowid\",\n numshares = updatedShares, tp = updatedShares * row[\"UnitPrice\"], rowid = row[\"id\"])\n remainingSharesToSell = 0\n\n if remainingSharesToSell == 0:\n break;\n\n quote = lookup(symbol)\n cashToReturn = quote[\"price\"] * sharesToSell\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session.get(\"user_id\"))\n usersCurrentCash = userRows[0][\"cash\"]\n\n updatedBalance = usersCurrentCash + cashToReturn\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\", cash = updatedBalance, userid = session.get(\"user_id\"))\n '''Update history'''\n dateNow = datetime.datetime.now()\n db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbl, :shares, :price, :date, :userid)\", symbl = symbol, shares = -1 * sharesToSell, price = -1 * cashToReturn, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n else:\n symbolRows = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n symbls = []\n for row in symbolRows:\n symbls.append(row[\"Symbol\"])\n\n return render_template(\"sell.html\", symbols=symbls)", "def attempt_market_buy(self, decision: Decision, available_capital: float, state: Series, is_backtest: bool = False, crypto: bool = False) -> Transaction:\n try: latest_price = self.latest_price(decision.symbol, state, is_backtest, crypto)\n except:\n print('Error retrieving latest price')\n return Transaction(False, TransactionType.MarketBuy, 0, 0, decision, state['date'])\n\n # Determine how many shares we can/should purchase given a decision\n share_quantity = 0\n if isinstance(decision.quantity, BuyQuantity):\n diviser = 1 if decision.quantity == BuyQuantity.Max else 2\n share_quantity = ((available_capital * .9) / diviser) / latest_price\n else:\n max_purchase_quantity = (available_capital * .9) / latest_price\n if not self.allow_fractional and not crypto: max_purchase_quantity = math.floor(max_purchase_quantity)\n\n if decision.quantity < max_purchase_quantity: # decision is valid quantity\n share_quantity = decision.quantity\n else: # Can't afford requested amount, instead buy as much as possible\n share_quantity = max_purchase_quantity\n\n if not self.allow_fractional and not crypto:\n try: share_quantity = math.floor(share_quantity)\n except: print('Error getting share quantity:', share_quantity, decision.quantity, available_capital, self.latest_price(decision.symbol, state, is_backtest, crypto))\n\n strike_price: float\n succeeded = True\n \n if share_quantity == 0 or (not self.allow_fractional and not crypto and share_quantity < 0):\n print('share_quantity=0 error - returning')\n strike_price = 0\n share_quantity = 0\n succeeded = False\n elif is_backtest:\n c_type = 'crypto' if crypto else 'stock'\n # spread = .01 if c_type == 'stock' else 0\n spread = 0\n buy_fee = state['close'] * self.get_fee_pct(c_type)[0] + self.get_fixed_fee(c_type, state[\"close\"], share_quantity)\n self.total_fees += buy_fee\n self.trade_volume_shares += share_quantity\n print(f'unadjusted price: {state[\"close\"]} | fee: {buy_fee} | trade volume: {self.trade_volume} | total fees: {self.total_fees}')\n strike_price = state['close'] + buy_fee + spread\n else:\n try:\n if crypto:\n try:\n print('attempting crypto market buy @ ', latest_price)\n res = asyncio.get_event_loop().run_until_complete(wait_for_cb_order_fill(self.cb_client, decision.contract, 'buy', share_quantity, latest_price))\n (strike_price, share_quantity, succeeded) = res\n except Exception as e:\n print('asnycio wait_for_cb_order_fill error:', e)\n strike_price = 0\n succeeded = False\n else:\n print(f'attempting {decision.symbol} ib market buy @ {latest_price}')\n # buy_order = MarketOrder('BUY', share_quantity)\n buy_order = LimitOrder('BUY', share_quantity, latest_price)\n res = asyncio.get_event_loop().run_until_complete(wait_for_ib_order_fill(self.ib_client.ib, buy_order, decision.contract))\n \n print('market buy res:', res)\n (strike_price, share_quantity, succeeded) = res\n\n except Exception as e: # Failed to purchase at limit price\n print('market buy error:', e)\n succeeded = False\n strike_price = 0\n share_quantity = 0\n\n self.trade_volume += (strike_price * share_quantity)\n return Transaction(succeeded, TransactionType.MarketBuy, strike_price, share_quantity, decision, state['date'])", "def trade_action(self, BUY_QTY):\n BUY_QTY = 4500\n self.trade(BUY_QTY)\n #self.show()", "def sell():\n userId = session[\"user_id\"]\n\n sharesOwned = db.execute(f\"SELECT symbol, SUM(shares) FROM transactions WHERE user_id={userId} GROUP BY symbol HAVING SUM(shares)>0\")\n\n if request.method == \"GET\":\n\n return render_template(\"sell.html\", sharesOwned=sharesOwned)\n\n elif request.method == \"POST\":\n\n symbolInput = request.form.get(\"symbol\")\n shares = float(request.form.get(\"shares\")) * (-1)\n\n symbolName = lookup(symbolInput)[\"name\"]\n symbolPrice = lookup(symbolInput)[\"price\"]\n symbolTicker = lookup(symbolInput)[\"symbol\"]\n\n shareCount = float(db.execute(f\"SELECT SUM(shares) FROM transactions WHERE user_id={userId} AND symbol='{symbolInput}' GROUP BY symbol HAVING SUM(shares)>0\")[0][\"SUM(shares)\"] * (-1))\n\n if symbolInput != symbolTicker or symbolInput == \"\" or shares == \"\" or shares > 0 or shares < shareCount:\n return apology(\"No sell for you senpai!\")\n\n else:\n totalPrice = shares * symbolPrice\n availableCash = float(db.execute(f\"SELECT cash FROM users WHERE id={userId}\")[0][\"cash\"])\n\n now = datetime.now()\n transTime = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n availableCash -= totalPrice\n\n db.execute(f\"UPDATE users SET cash = '{availableCash}' WHERE id = '{userId}'\")\n\n db.execute(f\"INSERT INTO transactions (trans_time, trans_type, user_id, symbol, price, shares, value, name, current_price) VALUES ('{transTime}','SELL','{userId}','{symbolTicker}','{symbolPrice}','{shares}','{totalPrice}','{symbolName}','{symbolPrice}')\")\n\n return redirect(\"/\")", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n elif request.method == \"POST\":\n\n symbolInput = request.form.get(\"symbol\")\n shares = float(request.form.get(\"shares\"))\n\n symbolName = lookup(symbolInput)[\"name\"]\n symbolPrice = lookup(symbolInput)[\"price\"]\n symbolTicker = lookup(symbolInput)[\"symbol\"]\n\n if symbolInput != symbolTicker or symbolInput == \"\" or shares == \"\" or shares < 1:\n return apology(\"No buy for you senpai!\")\n\n else:\n userId = session[\"user_id\"]\n totalPrice = shares * symbolPrice\n availableCash = float(db.execute(f\"SELECT cash FROM users WHERE id={userId}\")[0][\"cash\"])\n\n if totalPrice > availableCash:\n return apology(\"Not enough available tendies\")\n else:\n now = datetime.now()\n transTime = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n availableCash -= totalPrice\n\n db.execute(f\"UPDATE users SET cash = '{availableCash}' WHERE id = '{userId}'\")\n\n db.execute(f\"INSERT INTO transactions (trans_time, trans_type, user_id, symbol, price, shares, value, name, current_price) VALUES ('{transTime}','BUY','{userId}','{symbolTicker}','{symbolPrice}','{shares}','{totalPrice}','{symbolName}','{symbolPrice}')\")\n\n return redirect(\"/\")", "def sellshares():\n # Initialise buy and sell share forms\n sellform = SellShareForm()\n # Validate and process form data\n if(sellform.validate_on_submit()):\n # Buys shares\n issuerID = sellform.sellsharecode.data\n quantity = sellform.sellquantity.data\n userID = current_user.userID\n # Call buyshare API\n sellshare = gdb.sellshare(userID, issuerID, quantity)\n if(sellshare):\n # Flash with success message\n flash(\"Share sale successful!\", category=\"success\")\n else:\n # Flash with warning message\n flash(\"Share sale unsuccessful!\", category=\"error\")\n # Redirect to reffering page or dashboard\n return redirect(request.referrer or url_for('main.dashboard'))", "def buy():\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n elif not request.form.get(\"shares\"):\n return apology(\"must provide shares\", 400)\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be integer\",400)\n\n elif int(request.form.get(\"shares\"))<1 :\n return apology(\"must be positive integer\", 400)\n\n elif lookup(request.form.get(\"symbol\"))==None:\n return apology(\"Must be a valid symbol\",400)\n\n #ensure money>price\n quote=lookup(request.form.get(\"symbol\"))\n shares=request.form.get(\"shares\")\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n if cash[0][\"cash\"]<int(quote[\"price\"])*int(shares):\n return apology(\"You can't affort this/these\",400)\n\n #BUY, STORE DATA IN REPOSITORY AND RECORD\n\n #record this transaction\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\",session[\"user_id\"],int(shares),quote[\"symbol\"],float(quote[\"price\"]))\n\n #deduct the cash\n total=int(quote[\"price\"])*int(shares)\n db.execute(\"UPDATE users SET cash=cash- (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def buy():\n \n if request.method == \"POST\":\n \n time = str(datetime.now())\n \n quantity = int(request.form.get(\"quantity\"))\n \n if quantity < 1:\n return apology(\"you need to provide right quantity\")\n \n # get user's cash\n user_id = int(session.get('user_id'))\n \n data_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id = user_id)\n \n convert = data_cash[0]\n cash = convert.get('cash')\n \n # getting stock request data\n quote = session['quote']\n \n symbol, name, price = quote['symbol'], quote['name'], float(quote['price'])\n total = price * quantity\n \n #check if user can afford so much stock\n \n if total > cash:\n return apology('you don\\'t have enough money')\n \n #INSERT bought stock into history table\n db.execute('''INSERT INTO history (date, user_id, stock_name, symbol, quantity, price, deal) \n VALUES (:date, :user_id, :stock_name, :symbol, :quantity, :price, :deal)''',\n date = time,\n user_id = user_id,\n stock_name = name,\n symbol = symbol,\n quantity = quantity,\n price = total,\n deal = 'buy')\n #update portfolio\n #check if user has bought this stock before\n symbol_check = db.execute('''SELECT symbol FROM portfolio WHERE user_id = :user_id''',\n user_id = user_id)\n \n if [x for x in symbol_check if x['symbol'] == symbol]:\n #update stock if user has bought such shares before\n db.execute('''UPDATE portfolio \n SET quantity = quantity + :quantity \n WHERE (user_id = :user_id AND symbol = :symbol)''', \n quantity = quantity, user_id = user_id, symbol = symbol)\n \n else:\n #add new shares to portfolio\n db.execute('''INSERT INTO portfolio VALUES (:user_id, :symbol, :quantity)''',\n user_id = user_id, symbol = symbol, quantity = quantity)\n \n #update cash\n db.execute('UPDATE users SET cash = cash - :total WHERE id = :user_id', total = total, user_id = user_id)\n \n return redirect(url_for(\"index\"))\n \n else:\n return redirect(url_for(\"quote\"))", "def buy():\n\n # User reached route via POST\n if request.method == 'POST':\n\n # Ensure shares is a positive integer:\n try:\n if int(request.form.get('shares')) < 1:\n return apology(\"input isn't a positive integer\", 400)\n except ValueError:\n return apology(\"input isn't an integer\", 400)\n\n # Ensure symbol was provided\n if not request.form.get('symbol'):\n return apology('must provide symbol', 403)\n\n # Ensure symbol exists\n if lookup(request.form.get('symbol')) == None:\n return apology(\"symbol doens't exist\")\n\n shares = int(request.form.get('shares'))\n\n stock_price = lookup(request.form.get('symbol'))['price']\n\n cash = db.execute('SELECT cash FROM users WHERE id = :id', id=session['user_id'])[0]['cash']\n\n # Check if the user can afford the stock\n if stock_price * shares > cash:\n return apology(f\"You don't have enough cash to buy {shares} shares.\", 403)\n\n db.execute('INSERT INTO transactions (id, operation, symbol, shares, price) VALUES(:id, :operation, :symbol, :shares, :stock_price)',\n id=session['user_id'],\n symbol=request.form.get('symbol').upper(),\n operation='BUY',\n shares=shares,\n stock_price=stock_price\n )\n\n db.execute('UPDATE users SET cash = :cash WHERE id = :id',\n cash=cash - shares * stock_price,\n id=session['user_id'])\n\n # Redirect user to home page\n return redirect('/')\n\n # User reached route via GET\n else:\n return render_template('buy.html')", "async def sell(self, ctx, amount : float, symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(amount, symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (ctx.message.author, portfolio.Value()))\n portfolio.Save()", "def sell():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Access form data\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n # Ensure symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n if not shares:\n return apology(\"must provide shares\", 400)\n\n # Obtain quote using lookup function\n QUOTED = lookup(symbol)\n\n # Check if user has enough shares to sell as requested\n shares_count = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n if shares > shares_count:\n return apology(\"not enough shares owned\", 400)\n\n # User has enough shares to sell as requested\n else:\n # Calculate new cash amount user has\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", user_id)[0][\"cash\"]\n cash_gained = QUOTED[\"price\"] * shares\n new_cash_total = cash + cash_gained\n\n # Update cash in users table for user\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", new_cash_total, user_id)\n\n # Insert sell log into history table\n db.execute(\"INSERT INTO history (user_id, symbol, shares, price, transacted) VALUES (?, ?, ?, ?, datetime('now'))\",\n user_id, QUOTED[\"symbol\"], -(shares), QUOTED[\"price\"])\n\n # Keep track of shares in shares table\n current_shares = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n new_shares_total = current_shares - shares\n\n # If 0 shares left of the stock owned\n if new_shares_total == 0:\n db.execute(\"DELETE FROM shares WHERE user_id = ? AND symbol = ?\", user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User still owns shares of the stock\n else:\n shares_value_total = new_shares_total * QUOTED[\"price\"]\n db.execute(\"UPDATE shares SET shares_count = ?, price = ?, total = ? WHERE user_id = ? AND symbol = ?\",\n new_shares_total, QUOTED[\"price\"], shares_value_total, user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n # Select share symbols from shares table for logged in user\n SHARES = db.execute(\"SELECT symbol FROM shares WHERE user_id = ?\", user_id)\n\n return render_template(\"sell.html\", shares=SHARES)", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure the user inputs a symbol\n symbol = request.form.get(\"symbol\").upper()\n if not symbol:\n return apology(\"must provide a symbol\", 403)\n\n # ensure number of shares is submitted\n shares = request.form.get(\"shares\")\n if not shares:\n return apology(\"must provide number of shares\", 403)\n\n\n # do a try except for handling negative values or empty spaces in shares input box\n try:\n shares = int(shares)\n if shares < 0:\n return apology(\"Enter a positive integer for shares\", 403)\n except ValueError:\n return apology(\"No empty spaces allowed enter a positive integer\", 403)\n\n # call lookup in helpers.py to look up a stock’s current price.\n stockPriceDetail = lookup(symbol)\n\n # render apology for invalid symbol input by user\n if stockPriceDetail == None:\n return apology(\"Invalid symbol\", 403)\n else:\n price = stockPriceDetail[\"price\"]\n\n # calculate the total price of the number of shares\n totalCost = price * shares\n print(totalCost)\n\n\n # based on user's input check if they have enough cash to buy stocks\n rows = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n print(\"rows= \" , rows)\n\n cash = rows[0] [\"cash\"]\n\n # Check for sufficient cash\n if cash < totalCost:\n return apology(\"you have insufficient cash balance\", 403)\n\n balance = cash - totalCost\n\n # insert row in transactions table\n result = db.execute(\"\"\"insert into transactions\n (user_id,stock_code,stock_quantity,stock_price,\n start_balance,end_balance,transaction_type)\n values(:userid, :symbol, :shares, :price, :cash,\n :balance,:ttype)\"\"\",\n userid=session[\"user_id\"],shares=shares,\n symbol=symbol,price=price,\n cash=cash,balance=balance,ttype=\"BOUGHT\")\n\n # update users balance\n result = db.execute(\"update users set cash = :balance where id = :userid\",\n userid=session[\"user_id\"],balance=balance)\n\n # Redirect user to index page\n return redirect(\"/\")\n\n else:\n symbol = request.args.get('symbol')\n return render_template(\"buy.html\",symbol=symbol)", "def sell():\n\n # if user reached route via GET return them an input form\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n\n # if user reached route via POST (as by submitting a form via POST)\n elif request.method == \"POST\":\n\n # get id as it is used many times\n id = session[\"user_id\"]\n\n # get symbol input\n symbol = request.form.get(\"symbol\")\n\n # get share volume requested\n volume = int(request.form.get(\"volume\"))\n\n # ensure stock symbol was submitted\n if not symbol:\n return apology(\"you must provide a stock symbol\")\n\n # ensure positive volume (integer rule handled elsewhere)\n elif volume <= 0:\n return apology(\"volume must be integer greater than 0\")\n\n # lookup stock on yahoo\n stock_info = lookup(symbol)\n\n # if error looking stock up\n if not stock_info:\n return apology(\"that stock symbol doesn't exist\")\n\n # check if user already owns any stock in this company\n existing = db.execute(\"SELECT num_shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=id, symbol=symbol)\n\n # if sufficient cash, make purchase, else return apology\n if not existing:\n return apology(\"you don't own this stock\")\n else:\n if existing[0]['num_shares'] < volume:\n return apology('you cannot sell more shares than you own')\n else:\n # query database for\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=id)\n cash = cash[0]['cash']\n\n minus = db.execute(\"UPDATE portfolio SET num_shares = :num_shares WHERE id = :id AND symbol = :symbol\", num_shares=existing[0]['num_shares'] - volume, id=id, symbol=symbol)\n\n # set date string\n dstring = str(datetime.datetime.utcnow())\n\n # update transaction history\n result2 = db.execute(\"INSERT INTO `transaction` (id, symbol, volume, share_price, dtstamp) VALUES(:id, :symbol, :volume, :share_price, :dtstamp)\", id=id, symbol=symbol, volume=-volume, share_price=stock_info['price'], dtstamp=dstring)\n\n # calculate sale price\n sale_price = stock_info['price'] * volume\n\n # increase cash balance\n result = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash+sale_price, id=id)\n\n # redirect user to home page\n return redirect(url_for(\"index\"))", "def buy():\n if request.method == \"POST\":\n\n #validate input\n try:\n shares = int(request.form.get(\"shares\"))\n stock = lookup(request.form.get(\"symbol\"))\n except:\n return apology(\"enter a valid ticker\")\n\n #check shares not blank\n if not stock:\n return apology(\"please enter a stock\")\n\n #are shares there and more than 0?\n if not shares or shares <= 0:\n return apology(\"Please fill in all fields\")\n\n #does the user have enough cash\n money = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n money = int(money[0]['cash'])\n if stock[\"price\"] * shares > money:\n return apology(\"You don't have enough money\")\n else:\n db.execute(\"INSERT INTO portfolio (stock, price, trans_price, number, userid) VALUES (:stock, :price, :trans_price, :number, :userid)\", stock=stock['symbol'], price=stock['price'], trans_price=usd(stock['price']), number=shares, userid=session[\"user_id\"])\n db.execute(\"UPDATE users SET cash=cash-:total WHERE id=:userid\", total=(stock['price'] * shares), userid=session[\"user_id\"])\n\n return redirect(\"/\")\n\n if request.method == \"GET\":\n return render_template(\"buy.html\")", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure positive number of shares was submitted\n elif not request.form.get(\"shares\") or int(request.form.get(\"shares\")) < 0:\n return apology(\"must provide positive number of shares\", 403)\n\n else:\n returned_quote = lookup(request.form.get(\"symbol\"))\n row = db.execute(\"SELECT * FROM users WHERE id = :user_id\", user_id = session[\"user_id\"])\n if returned_quote == None:\n return apology(\"symbol does not exist\", 403)\n\n elif returned_quote[\"price\"] * int(request.form.get(\"shares\")) > row[0][\"cash\"]:\n return apology(\"cannot afford number of shares at current price\", 403)\n\n else:\n db.execute(\"INSERT INTO 'transaction' ('t_id','u_id','symbol','shares','price') VALUES (NULL,:u_id,:symbol,:shares,:price)\",\n u_id = session[\"user_id\"], symbol = returned_quote[\"symbol\"], shares = int(request.form.get(\"shares\")), price = returned_quote[\"price\"])\n db.execute(\"UPDATE users SET cash = cash - :price * :shares WHERE id = :user_id\",\n price = returned_quote[\"price\"], shares = int(request.form.get(\"shares\")), user_id = session[\"user_id\"])\n\n flash(\"Bought\")\n return redirect(\"/\")\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "async def price(self, ctx, name):\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\treal = str(price)\n\t\treal = ('0' * (3 - max(len(real), 0))) + real\n\t\treal = '$' + real[:-2] + '.' + real[-2:]\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(f'**{name}:** {price} {currency} per share ({real}).')", "def buy():\n current_cash= db.execute(\"select cash from users where id = \" + str(session[\"user_id\"]))[0]['cash']\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n\n x=lookup(symbol)\n if x == None:\n return apology(\"invalid symbol\", 400)\n\n price = int(shares)*x['price']\n new_cash = current_cash - price\n\n\n #print(\"insert into users (cash) values (?)\", new_cash + \" where id = \"+ str(session[\"user_id\"]))\n\n db.execute(\"UPDATE users SET cash = \"+ str(new_cash) +\" WHERE id = \"+ str(session[\"user_id\"]) +\";\")\n db.execute(\"insert into purchases (user_id, shares, symbol, price_total, price_per_shares) values (?, ?, ?, ?,? )\", session[\"user_id\"], shares, symbol, price, x['price'])\n db.execute(\"insert into history (user_id, type, amount, time, shares, name) values (?,?,?,?,?,?)\",str(session[\"user_id\"]), \"buy\", str(price), str(datetime.now()), str(shares), symbol)\n return redirect(\"/\")\n\n return render_template(\"buy.html\")", "def transact_shares(self, action, quantity, price, commission, bid=None, ask=None):\n if bid is None: \n bid = price\n if ask is None:\n ask = price\n\n if action is None:\n return\n\n self.total_commission += commission\n\n # Adjust total bought and sold\n if action == \"BOT\":\n self.avg_bot = (self.avg_bot * self.buys + price * quantity) / (self.buys + quantity)\n\n if self.net < 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (self.avg_price - price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n # Increasing long position\n self.avg_price = (self.avg_price * self.net + price * quantity + commission) / (self.net + quantity)\n self.buys += quantity\n self.total_bot = self.buys * self.avg_bot\n\n # action == \"SLD\"\n else:\n self.avg_sld = (self.avg_sld * self.sells + price * quantity) / (self.sells + quantity)\n\n if self.net > 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (price - self.avg_price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n\n self.avg_price = (self.avg_price * self.net - price * quantity - commission) / (self.net - quantity)\n self.sells += quantity\n self.total_sld = self.sells * self.avg_sld\n\n # Adjust net values, including commissions\n self.net = self.buys - self.sells\n self.net_total = self.total_sld - self.total_bot\n self.net_incl_comm = self.net_total - self.total_commission\n self.cost_basis = self.net * self.avg_price\n\n self.update_market_value(bid, ask)", "def sell():\n\n # User submits information\n if request.method == \"POST\":\n\n # Ensure user entered a stock\n if not request.form.get(\"symbol\"):\n return apology(\"must choose a stock\")\n\n # Get stock selected\n symbol = request.form.get(\"symbol\")\n \n # Ensure is a valid stock symbol\n if not lookup(symbol):\n return apology(\"Invalid stock symbol\")\n\n # Ensure user owns the stock requested\n test = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n if not test:\n return apology(\"you have 0 shares of this stock\")\n\n owns = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n # Ensure user entered a number in shares\n if not request.form.get(\"shares\") or not isinstance(request.form.get(\"shares\"), int):\n return apology(\"must enter postive whole number of shares\")\n\n shares = request.form.get(\"shares\")\n\n # Ensure number is positive\n if shares <= 0:\n return apology(\"must enter a positive number\")\n\n # Ensure user owns the amount of stock entered to sell\n if shares > owns[0]['shares']:\n return apology(\"you don't own that much of this stock\")\n\n # Get date and time for transaction\n day = datetime.now()\n time = datetime.now().time()\n\n # Get total and stock name for transaction\n price = lookup(symbol)['price']\n total = price * shares\n name = lookup(symbol)['name']\n\n # Sell shares of the stock and add to transactions history\n db.execute(\"INSERT INTO transactions (user_id, date, time, price, shares, total, stock, name, type) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n session[\"user_id\"], day, time, price, shares * -1, total, symbol, name, \"sell\")\n\n # Update portfolios table\n db.execute(\"UPDATE portfolios SET shares = shares - ? WHERE user_id = ? AND stocks = ?\", shares, session[\"user_id\"], symbol)\n\n # If stock shares is 0, delete from portfolio\n db.execute(\"DELETE FROM portfolios WHERE shares = ? \", 0)\n\n return redirect(\"/\")\n\n # If user reached page via link or redirect\n else:\n\n # Get list of stocks owned\n owns = db.execute(\"SELECT stocks FROM portfolios WHERE user_id = ? ORDER BY stocks\", session[\"user_id\"])\n\n return render_template(\"sell.html\", owns=owns)", "def buy(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.buy(symbol, quantity, in_force, extended)", "def Buy(self, X, Y):\n if self.money - (int(Y) * self.price[X][0] * (1 + self.taxe)) < 0:\n raise TradeError(\"Not Enough Money\")\n self.share[X] += int(Y)\n self.money -= int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"BUY:{str(int(Y))}:{str(X)}\", flush = True)", "def sell():\n\n if request.method == \"POST\":\n\n # define stock variables\n symbol = request.form.get(\"symbol\")\n stock = lookup(request.form.get(\"symbol\"))\n\n # error checking\n if not stock:\n return apology(\"Missing or Incorrect Symbol\", 400)\n\n # check if stock is owned\n try:\n sold_stock = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id AND symbol = :symbol GROUP BY symbol\", user_id=session[\"user_id\"], symbol=symbol)[0]\n except IndexError:\n return apology(\"Stock not owned\", 400)\n\n # check for shares input\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Input at least 1 share\", 400)\n\n if shares < 0:\n return apology(\"Input at least 1 Share\", 400)\n\n if int(sold_stock[\"shares\"]) < shares:\n return apology(\"Not enough shares to sell\", 400)\n\n else:\n # define variables for inserting into transactions table and updating cash\n purchase_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # update user cash\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n user_cash = user_cash + (stock[\"price\"]*shares)\n db.execute(\"UPDATE users SET cash = :user_cash WHERE id = :user_id\", user_id=session[\"user_id\"], user_cash=user_cash)\n\n # update transactions table with selling transaction\n db.execute(\"\"\"\n INSERT INTO transactions(user_id, date, symbol, shares, price)\n VALUES(:user_id, :date, :symbol, :shares, :price)\n \"\"\",\n user_id=session[\"user_id\"],\n date=purchase_date,\n symbol=stock[\"symbol\"],\n shares=-shares,\n price=stock[\"price\"]\n )\n\n flash(\"You paper-handed that one!\")\n return redirect(\"/\")\n\n else:\n # query db for current holdings\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n stocks[:] = [stock for stock in stocks if stock.get('shares') > 0]\n return render_template(\"sell.html\", stocks=stocks)", "def sell():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure positive number of shares was submitted\n elif not request.form.get(\"shares\") or int(request.form.get(\"shares\")) < 0:\n return apology(\"must provide positive number of shares\", 403)\n\n elif int(request.form.get(\"shares\")) > (db.execute(\"SELECT sum(shares) as shares FROM 'transaction' WHERE u_id = :user_id and symbol = :symbol\", user_id = session[\"user_id\"], symbol = request.form.get(\"symbol\")))[0][\"shares\"]:\n return apology(\"cannot sell more shares than owned\", 403)\n\n else:\n returned_quote = lookup(request.form.get(\"symbol\"))\n row = db.execute(\"SELECT * FROM users WHERE id = :user_id\", user_id = session[\"user_id\"])\n\n db.execute(\"INSERT INTO 'transaction' ('t_id','u_id','symbol','shares','price') VALUES (NULL,:u_id,:symbol,:shares,:price)\",\n u_id = session[\"user_id\"], symbol = returned_quote[\"symbol\"], shares = -1*int(request.form.get(\"shares\")), price = returned_quote[\"price\"])\n db.execute(\"UPDATE users SET cash = cash + :price * :shares WHERE id = :user_id\",\n price = returned_quote[\"price\"], shares = int(request.form.get(\"shares\")), user_id = session[\"user_id\"])\n\n flash(\"Sold\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n rows = db.execute(\"SELECT symbol, SUM(shares) as shares FROM 'transaction' WHERE u_id = :user_id GROUP BY symbol\", user_id = session[\"user_id\"])\n\n if len(rows) > 0:\n return render_template(\"sell.html\", rows = rows)\n else:\n return apology(\"no shares to sell\", 403)", "def buy():\n lookedup = []\n if request.method == \"POST\":\n if not request.form.get(\"buy_symbol\"):\n return apology(\"Must provide stock symbol\", 403)\n shares_to_buy = request.form.get(\"buy_amount\")\n if not shares_to_buy:\n return apology(\"Must provide number of shares to buy\", 403)\n\n shares_to_buy = int(shares_to_buy)\n\n if shares_to_buy <= 0:\n return apology(\"Must provide positive number of shares to buy\", 403)\n\n else:\n lookedup = lookup(request.form.get(\"buy_symbol\"))\n\n if not lookedup:\n return apology(\"Not a stock symbol\", 403)\n\n\n current_user = session[\"user_id\"]\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=current_user)\n\n # see if properly selecting cash amount\n if not user_cash:\n return apology(\"Didn't find user's current balance\", 000)\n\n\n current_cash = user_cash[0][\"cash\"]\n current_cash = int(current_cash)\n\n stock_name = lookedup.get(\"name\")\n stock_price = lookedup.get(\"price\")\n stock_symbol = lookedup.get(\"symbol\")\n\n total_cost = shares_to_buy * stock_price\n if current_cash < total_cost:\n return apology(\"You do not have enough money for this purchase\", 000)\n\n new_balance = current_cash - total_cost\n\n db.execute(\"UPDATE users SET cash = :new_balance WHERE id = :id\", new_balance=new_balance, id=current_user)\n\n db.execute(\"INSERT INTO purchases (id,stock_symbol,volume_purchased,price,date_purchased) VALUES(:id,:symbol,:amount,:price,datetime('now'))\", id=current_user, symbol=stock_symbol, amount=shares_to_buy, price=stock_price)\n\n check_holdings = db.execute(\"SELECT volume FROM portfolio WHERE id = :id AND stock_symbol=:stock_symbol\", id=current_user, stock_symbol=stock_symbol)\n\n if not check_holdings:\n db.execute(\"INSERT INTO portfolio (id,stock_symbol,volume) VALUES(:id,:stock_symbol,:volume)\", id=current_user, stock_symbol=stock_symbol, volume=shares_to_buy)\n else:\n old_volume = check_holdings[0][\"volume\"]\n old_volume = int(old_volume)\n new_volume = old_volume+shares_to_buy\n db.execute(\"UPDATE portfolio SET volume = :new_volume\", new_volume=new_volume)\n\n\n\n return render_template(\"bought.html\", stock_name=stock_name,stock_price=stock_price, stock_symbol=stock_symbol, shares_to_buy=shares_to_buy, total_cost= total_cost)\n\n\n\n else:\n return render_template(\"buy.html\")\n\n\n return apology(\"TODO BUY\")" ]
[ "0.75090504", "0.73669976", "0.719782", "0.7076133", "0.702035", "0.70142156", "0.69631433", "0.6943623", "0.6915625", "0.691014", "0.6897359", "0.68731195", "0.6865242", "0.68647367", "0.6850216", "0.6848369", "0.6818854", "0.67891747", "0.6722476", "0.6719868", "0.67188966", "0.67112154", "0.6705635", "0.6700653", "0.66989964", "0.6695444", "0.6687238", "0.6684804", "0.66811556", "0.6655224", "0.6651959", "0.6649536", "0.6624344", "0.66173476", "0.6613126", "0.6609052", "0.6603438", "0.6595926", "0.6585596", "0.6578174", "0.65766925", "0.6573936", "0.6562317", "0.65583086", "0.65577334", "0.6526294", "0.65238345", "0.6519029", "0.6516538", "0.65035534", "0.64914435", "0.64667463", "0.6459902", "0.6451228", "0.64506495", "0.64483476", "0.6447599", "0.64350283", "0.6431609", "0.6421019", "0.6420768", "0.6420553", "0.6419165", "0.6416702", "0.64161277", "0.64114636", "0.6407591", "0.6406082", "0.63956153", "0.6387202", "0.63830394", "0.638049", "0.635205", "0.6341835", "0.6338913", "0.63325727", "0.63291985", "0.6327545", "0.632726", "0.632575", "0.6305297", "0.6302711", "0.6300179", "0.6298004", "0.62972134", "0.62947214", "0.628081", "0.62716454", "0.62636733", "0.62556595", "0.6254726", "0.625456", "0.6253792", "0.62462944", "0.6244217", "0.6216623", "0.62081665", "0.62070346", "0.62038547", "0.6201706" ]
0.6677518
29
Show history of transactions
def history(): user_id = session.get('user_id') table_name = f'stocks_user{user_id}' rows = db.execute("SELECT * FROM ?", table_name) return render_template('history.html', rows=rows)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Transacted FROM cash WHERE id=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", transactions=transactions)", "def history():\n\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n\n #Convert Price to US Dollars and format transaction time\n for t in trans:\n t.price = usd(t.price)\n t.transacted = t.transacted.strftime('%Y-%m-%d %H:%M:%S')\n\n #Return history.html\n return render_template('history.html', trans=trans)", "def history():\n transactions_list = db.execute(\"SELECT stock, units, price, time, type FROM transactions WHERE id = :current_id\",\n current_id=session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions_list)", "def history():\n\n # get all transactions for current user\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n\n # render history.html with all user transactions\n return render_template(\"history.html\", transactions=transactions, usd=usd)", "def history():\n\n rows = db.execute(\"SELECT * FROM 'transaction' WHERE u_id = :user_id\", user_id = session[\"user_id\"])\n return render_template(\"history.html\", rows = rows)", "def history():\n\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = ? ORDER BY date DESC, time DESC\", session[\"user_id\"])\n\n return render_template(\"history.html\", transactions=transactions)", "def history():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Obtain history information for logged in user\n TRANSACTIONS = db.execute(\"SELECT * FROM history WHERE user_id = ? ORDER BY transacted DESC\", user_id)\n\n return render_template(\"history.html\", transactions=TRANSACTIONS)", "def history():\n\n transactions = db.execute(\"SELECT stock, amount, price, date, time, total_amount FROM transactions WHERE id=:id\", id=session['user_id'])\n\n\n return render_template(\"index.html\", transactions=transactions)", "def history():\n\n rows = db.execute('SELECT operation, symbol, shares, price, date FROM transactions WHERE id = :id',\n id=session['user_id'])\n\n return render_template('history.html', stocks=rows[::-1])", "def history():\n\n user = session[\"user_id\"]\n rows = db.execute(\"SELECT * FROM transactions WHERE user_id = :user\", user=user)\n\n # transactions list\n transactions = []\n for row in rows:\n stock_data = lookup(row['symbol'])\n transactions.append(list((\n stock_data['symbol'],\n stock_data['name'],\n row['amount'],\n row['value'],\n row['date'],\n )))\n\n return render_template(\"history.html\", transactions=transactions)", "def history():\n transactions = db.execute(\"SELECT Symbol, Shares, Price, Date FROM history WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n return render_template(\"history.html\", transactionList = transactions, currentUser=session.get(\"user_id\"))", "def history():\n\n userId = session[\"user_id\"]\n\n shares = db.execute(f\"SELECT symbol, shares, price, trans_time FROM transactions WHERE user_id={userId} ORDER BY trans_id DESC\")\n\n return render_template(\"history.html\", shares=shares)", "def history():\n rows = db.execute(text(\n \"SELECT symbol, shares, price, time FROM transactions \"\n \"WHERE user_id=:id\"),\n id=session[\"user_id\"])\n transactions = []\n for row in rows:\n transaction = dict(row)\n transaction[\"price\"] = usd(transaction[\"price\"])\n transactions.append(transaction)\n return render_template(\"history.html\", transactions=transactions)", "def history():\n # query database for history\n transactions = db.execute(\"SELECT symbol, volume, share_price, dtstamp FROM `transaction` WHERE id = :id\", id = session[\"user_id\"])\n\n # initialise dict\n dic = {}\n\n # interate through history array\n\n # pass data to template\n return render_template(\"history.html\", transactions = transactions)", "def history():\n # name variable to show current users name in template\n name = db.execute(\"SELECT username FROM users WHERE id=:id\", id=session[\"user_id\"])\n\n # user's transaction history\n hist = db.execute(\"SELECT transactid, name, price, quantity, date FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n\n # return the template with the relevant objects for jinja\n return render_template(\"history.html\", name=name, hist=hist)\n\n # if function fails\n return apology(\"Can't display history\", 400)", "def history():\n get_trans_codes = db.execute(\"SELECT transaction_code FROM History WHERE id = :id\", id=session['user_id'])\n get_symbols = db.execute(\"SELECT symbol FROM History WHERE id = :id\", id=session['user_id'])\n get_companies = db.execute(\"SELECT company FROM History WHERE id = :id\", id=session['user_id'])\n get_trans_types = db.execute(\"SELECT transaction_type FROM History WHERE id = :id\", id=session['user_id'])\n get_shares = db.execute(\"SELECT shares FROM History WHERE id = :id\", id=session['user_id'])\n get_prices = db.execute(\"SELECT price FROM History WHERE id = :id\", id=session['user_id'])\n get_timestamps = db.execute(\"SELECT timestamp FROM History WHERE id = :id\", id=session['user_id'])\n\n trans_codes = [code['transaction_code'] for code in get_trans_codes]\n symbols = [symbol['symbol'] for symbol in get_symbols]\n companies = [company['company'] for company in get_companies]\n trans_types = [types['transaction_type'] for types in get_trans_types]\n shares = [share['shares'] for share in get_shares]\n prices = [price['price'] for price in get_prices]\n timestamps = [timestamp['timestamp'] for timestamp in get_timestamps]\n\n return render_template(\"history.html\", values=zip(trans_codes, symbols, companies, trans_types, shares, prices, timestamps))", "def history():\n query = Records.query.filter_by(user_id=session.get(\"user_id\")).all()\n return render_template(\"history.html\", rows=query)", "def history():\n\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user ORDER BY date\",\n user=user)\n\n return render_template(\"history.html\", stocks = owned)", "def history():\n # Select stock info for every single stock transaction for the respective user\n rows = db.execute(\"SELECT symbol, shares, price, transacted FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Return template with the list that has each stock transaction info\n return render_template(\"history.html\", rows=rows)", "def history():\n transactions = db.execute(\"SELECT * FROM history WHERE user_id = ?\", session[\"user_id\"])\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n \n return render_template(\"history.html\", transactions=transactions, user_name=user_name[0][\"username\"])", "def history():\n # extract history of operation for a particular user\n historical_data = db.execute(\"SELECT Symbol, Company, Shares, Price, Total, Timestamp FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", historical=historical_data)", "def history():\n userID = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM transactions WHERE id=:userID\", userID=userID)\n\n for row in transactions:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"total\"] = usd(row[\"num_shares\"] * row[\"price_ps\"])\n\n return render_template(\"history.html\", transactions=transactions)", "def history():\n histories = db.execute(\"SELECT * from purchases WHERE user_id=:id\", id=session[\"user_id\"])\n \n return render_template(\"history.html\", histories=histories)", "def history():", "def do_gethistory(self,args):\n #Very rough. pretty print it\n history=bitstamp.get_usertransactions()\n ppdict(history)", "def history():\n userid = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM purchase WHERE userid = :userid\", userid = userid)\n for transaction in transactions:\n transaction[\"price\"] = usd(transaction[\"tot\"]/transaction[\"shares\"])\n transaction[\"name\"] = lookup(transaction[\"symbol\"])['name']\n return render_template(\"history.html\", transactions=transactions)", "def history():\n rows=db.execute(\"SELECT * FROM record ORDER BY t1\")\n return render_template(\"history.html\",rows=rows)", "def history():\n username = session.get(\"username\")\n history=db.execute(\"SELECT stock_symbol, unit_price, time, quantity, stock_name, status FROM history WHERE username=:username\",\n username=username)\n return render_template(\"history.html\", history=history)", "def transaction_history(user_id):\n # Run the transaction in the background\n executor.submit(transaction_run)\n user_id = login_session['user_id']\n # Get all transaction made by all the users\n user_tran = Transaction.query.filter_by(done=True).filter_by(user_id=user_id).all()\n target_tran = Transaction.query.filter_by(done=True).filter_by(target_user=user_id).all()\n user_curr = Currency.query.filter_by(user_id=user_id).first()\n\n return render_template('trans_history.html',\n transactions=user_tran + target_tran,\n currency=user_curr)", "def history():\n \n user_id = session[\"user_id\"]\n history_list = hist(user_id, db)\n return render_template('history.html', history=history_list)", "def history():\n user_id = session[\"user_id\"]\n\n history_list = db.execute(\"SELECT symbol, price, amount, timestamp FROM stocks WHERE user_id = :user_id\", user_id = user_id)\n\n rows = len(history_list)\n\n history = []\n\n for row in range(rows-1, -1, -1):\n history.append([history_list[row][\"symbol\"], history_list[row][\"amount\"], history_list[row][\"price\"], history_list[row][\"timestamp\"]])\n\n return render_template(\"history.html\", history = history, rows = rows)", "def history():\n\n symbols = []\n shares = []\n prices = []\n times = []\n\n purchases = db.execute(\"SELECT * FROM purchase WHERE id = :username\", username=session[\"user_id\"])\n length = len(purchases)\n\n for item in purchases:\n symbols.append(item[\"symbol\"])\n shares.append(item[\"shares\"])\n prices.append(item[\"price\"])\n times.append(item[\"created_at\"])\n\n return render_template(\"history.html\", symbols = symbols, shares = shares, prices = prices, times = times, length = length)", "def history():\n rows = db.execute(\"SELECT * FROM histories WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", rows=rows)", "def history():\n rows = db.execute(\"SELECT stock_id, stocks.symbol, price, shares, date FROM history JOIN stocks ON history.stock_id=stocks.id WHERE user_id=:user_id\", user_id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n\n return render_template(\"history.html\", history = history)", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def history():\n history = db.execute(\"SELECT * from history WHERE id=:id\", id=session[\"user_id\"])\n \n return render_template(\"history.html\", history = history)", "def history():\n \n value_dicts = db.execute(\"SELECT * FROM history WHERE user_id = :usid\", usid=session[\"user_id\"])\n return render_template(\"history.html\", value_dicts=value_dicts)", "def show_history_log(self):\n self.visual.print_enum(self.visual.history_log)", "def view_order_history(request):\n\n\ttemplate_name = 'order_history.html'\n\tall_orders = Order.objects.filter(user=request.user, payment__isnull=False)\n\tcontext = {'orders': all_orders}\n\treturn render(request, template_name, context)", "def history():\n\n data = db.execute(\"select * from history\")\n return render_template(\"history.html\", data=data)", "def history():\n \n #select user's portfolio\n rows = db.execute(\"SELECT stock, number, trans_price, transaction_stamp FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n return render_template(\"history.html\", rows=rows)", "def history():\n \n # selection of name, symbol, shares and cash of user stocks\n hist = db.execute(\"SELECT * FROM history WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"history.html\", hist=hist)", "def history():\n \"\"\"Show portfolio of stocks\"\"\"\n all_rows = []\n rows = db.execute(\"SELECT * FROM history WHERE id = :id\",\n id=session['user_id'])\n if rows==None or len(rows) < 1:\n return render_template(\"history.html\", all_rows=all_rows)\n else:\n for row in rows:\n share_row = []\n share_row.append(row[\"symbol\"])\n share_row.append(row[\"shares\"])\n share_row.append(usd(row[\"price\"]))\n share_row.append(row[\"transacted\"])\n all_rows.append(share_row)\n return render_template(\"history.html\", all_rows=all_rows)", "def history():\n current_userid = session[\"user_id\"]\n userbalance = get_userbal(db, current_userid)\n userstocks = get_userstock(db, current_userid)\n stockhistory = get_history(db, current_userid)\n stocklist = get_stocklist(db, stocksid=True, prices=True)\n if request.method == \"GET\":\n return render_template(\"history.html\", userbalance=usd(userbalance),\n userstocks=userstocks, buystocks=stocklist,\n stockhistory=stockhistory)\n else:\n return apology(\"TODO\")", "def get_transaction_history(self, txn_id_or_ref):\n response = self.get(f\"{self.gateway_path}/timeline/{txn_id_or_ref}\")\n return response", "def history():\n\n user = session.get(\"user_id\")\n rows = db.execute(\"Select TransDate as Date, Stock, Price, case when Num < 0 then 'Sell' else 'Buy' end as Type, Num as Quantity from portfolio where User = :User order by Date asc\", User = session.get(\"user_id\"))\n\n\n return render_template(\"hist.html\", rows = rows)", "def history():\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n symbols = db.execute(\"SELECT symbol FROM history WHERE username=:username\", username=username)\n buyorsells = []\n for item in db.execute(\"SELECT buyorsell FROM history WHERE username=:username\", username=username):\n if item[\"buyorsell\"]:\n buyorsells.append(\"Bought\")\n else:\n buyorsells.append(\"Sold\")\n numbers = db.execute(\"SELECT number FROM history WHERE username=:username\", username=username)\n prices = db.execute(\"SELECT price FROM history WHERE username=:username\", username=username)\n dates = db.execute(\"SELECT date FROM history WHERE username=:username\", username=username)\n return render_template(\"history.html\", username=username, symbols=symbols, buyorsells=buyorsells, numbers=numbers,\n prices=prices, dates=dates)", "def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def history():\n user_history=db.execute(\"SELECT * FROM history WHERE user_id=:u_i\",u_i=session[\"user_id\"])\n return render_template(\"history.html\",s=user_history)", "def add_history(self):\n # add separator, if there already are history entries\n if self.parentApp.History != '':\n self.parentApp.History += (\n '\\n\\n--- --- --- --- --- --- --- --- --- --- --- ---\\n\\n'\n )\n\n # add the transaction to it\n self.parentApp.History += self.parentApp.tmpTransC.to_str()", "def show_history(self, ts=0, count=0, fmt='raw'):\n print \"Querying the station for historical records...\"\n for i, r in enumerate(self.station.genArchiveRecords(ts)):\n if fmt.lower() == 'raw':\n self.print_raw(r['datetime'], r['ptr'], r['raw_data'])\n elif fmt.lower() == 'table':\n self.print_table(r['datetime'], r['data'], i == 0)\n else:\n print r['datetime'], r['data']\n if count and i > count:\n break", "def history():\n # User reached route via GET (as by clicking a link or via redirect)\n if request.method == \"GET\":\n # Select to buy-sell table\n bs = db.execute(\"SELECT * FROM bs WHERE userID=:userID\", userID=session[\"user_id\"])\n\n # len of buy sell table\n bslen = len(bs)\n\n # Falsh massage\n flash('history')\n\n # Rander buy sell and total return value list\n return render_template(\"history.html\", bs=bs, bslen=bslen)", "def history_testnet(btc_address):\n history = []\n response = json.loads(make_request('http://tbtc.blockr.io/api/v1/address/txs/' + btc_address))\n if response.get('status') == 'success':\n data = response.get('data')\n txs = data.get('txs')\n\n for tx in reversed(txs):\n history.append(get_tx_info(tx.get('tx')))\n\n return history", "def history():\n \n # only prints shifts from current user\n usernum = db.execute(\"SELECT * FROM users WHERE id=:id\", id = session[\"user_id\"])[0][\"id\"]\n \n # stores shift data into hours\n hours = db.execute(\"SELECT * FROM history WHERE User=:id\", id = usernum)\n \n # calculates total amount of cash ever paid to user\n cash = db.execute(\"SELECT sum(total) FROM history WHERE User=:id\", id = session[\"user_id\"])[0][\"sum(total)\"]\n \n return render_template(\"history.html\", hours = hours, Total = cash)", "def history():\n #Get users history (no amount = 0)\n #Prepare table\n\n rows = db.execute(\"SELECT * from history WHERE user_id = :userid AND amount != 0\", userid = session[\"user_id\"])\n for row in rows:\n row['price'] = usd(row['price'])\n\n return render_template(\"history.html\", history=rows)", "def history(request):\n\treturn render(request,'history.html',None)", "def history(self):\n alembic.command.history(self.alembic_config(), verbose=True)", "def history():\n\n # obtain stock info from portfolio database\n history = db.execute(\"SELECT symbol, shares, price, date FROM history WHERE id = :id ORDER BY date DESC\", id=session[\"user_id\"])\n \n # for every stock in the user's portfolio, assign dict key/values for use in html/jinja\n for transaction in history:\n symbol = transaction[\"symbol\"]\n shares = transaction[\"shares\"]\n price = transaction[\"price\"]\n date = transaction[\"date\"]\n\n return render_template(\"history.html\", history = history)", "def history():\n hist = db.execute(\"SELECT * FROM shares WHERE userid = :uid ORDER BY date DESC\", uid=session[\"user_id\"])\n for h in hist:\n h[\"total\"] = round(h[\"value\"]*h[\"quantity\"],2)\n return render_template(\"history.html\", context=hist)", "def show_history(user_id):\n return History.where('user_id', user_id).get()", "def get_deposit_history(self, currency=None):\n if not currency:\n currency = \"\"\n return self.__call__('balance', \"getdeposithistory\", \n {\"currencyname\": currency})", "def history(self, request, *args, **kwargs):\n account = self.get_object()\n\n try:\n history = HistoricoConta.objects.filter(conta=account).order_by('-created')\n except ObjectDoesNotExist as obj:\n return Response({\"detail\": \"Could not find history for thus account\",\n \"status_code\": status.HTTP_404_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n return Response(HistoricoContaSerializer(history, many=True).data)", "def list_history(request):\n history = History.objects\n\n if not is_admin(request.user):\n history = history.filter(submitter=request.user)\n history = history.order_by('-submission_date')\n\n return render('editor/list_history.mako', request, {\n 'history': history,\n })", "def orders_history(self): \n return(self._d_orders['history'])", "def history(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/history.html',\r\n context_instance=RequestContext(request,\r\n {\r\n 'title': 'Work History',\r\n 'contact': Contact.objects.get(pk=1),\r\n 'work_histories': WorkHistory.objects.all().order_by('-start_date'),\r\n 'current_application': Application.objects.get(pk=1),\r\n\r\n })\r\n )", "def show_history(self, ts=0, count=0, fmt='raw'):\n records = self.station.get_records(since_ts=ts, num_rec=count)\n for i,r in enumerate(records):\n if fmt.lower() == 'raw':\n raw_dump(r['datetime'], r['ptr'], r['raw_data'])\n elif fmt.lower() == 'table':\n table_dump(r['datetime'], r['data'], i==0)\n else:\n print(r['datetime'], r['data'])", "def get_history(self, taxlot_view):\n history = []\n\n def record_dict(log):\n filename = None if not log.import_filename else path.basename(log.import_filename)\n if filename:\n # Attempt to remove NamedTemporaryFile suffix\n name, ext = path.splitext(filename)\n pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$')\n match = pattern.match(name)\n if match:\n filename = match.groups()[0] + ext\n return {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n 'source': log.get_record_type_display(),\n 'filename': filename,\n # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None\n }\n\n log = TaxLotAuditLog.objects.select_related('state', 'parent1', 'parent2').filter(\n state_id=taxlot_view.state_id\n ).order_by('-id').first()\n master = {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n }\n\n # Traverse parents and add to history\n if log.name in ['Manual Match', 'System Match', 'Merge current state in migration']:\n done_searching = False\n while not done_searching:\n if (log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit':\n done_searching = True\n elif log.name == 'Merge current state in migration':\n record = record_dict(log.parent1)\n history.append(record)\n if log.parent1.name == 'Import Creation':\n done_searching = True\n else:\n tree = log.parent1\n log = tree\n else:\n tree = None\n if log.parent2:\n if log.parent2.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent2)\n history.append(record)\n elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \\\n log.parent2.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent2.parent2)\n history.append(record)\n record = record_dict(log.parent2.parent1)\n history.append(record)\n else:\n tree = log.parent2\n if log.parent1.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent1)\n history.append(record)\n else:\n tree = log.parent1\n\n if not tree:\n done_searching = True\n else:\n log = tree\n elif log.name == 'Manual Edit':\n record = record_dict(log.parent1)\n history.append(record)\n elif log.name == 'Import Creation':\n record = record_dict(log)\n history.append(record)\n\n return history, master", "def history():\n db.execute(\"CREATE TABLE IF NOT EXISTS hist(user_id INTEGER NOT NULL, typ TEXT NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, nos INTEGER NOT NULL, cost NUMERIC NOT NULL, time DATETIME NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n stocks = db.execute(\"SELECT * FROM hist WHERE user_id = ?\", session[\"user_id\"])\n return render_template(\"history.html\", stocks = stocks)", "def history():\n \n u_row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = u_row[0]['username']\n \n result = db.execute(\"SELECT * FROM history WHERE username=:username\", username=username)\n \n if result:\n dict = {}\n dict['symbol'] = []\n dict['shares'] = []\n dict['price'] = []\n dict['time'] = []\n \n for row in result:\n symbol = row['symbol']\n shares = row['shares']\n time = row['time']\n \n quote = lookup(symbol)\n name = quote['name']\n price = quote['price']\n total = shares * price\n \n dict['symbol'].append(symbol)\n dict['shares'].append(shares)\n dict['price'].append(usd(price))\n dict['time'].append(time)\n \n length = len(dict['symbol'])\n \n return render_template(\"history.html\",length=length,dict=dict)\n \n else:\n return render_template(\"history.html\",length=0,dict=[])", "def GetHistory(index=0):\n if index == \"clear\":\n state_mgr.entire_history = []\n else:\n print state_mgr.entire_history[int(index):]", "def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))", "def view_transactions(self) -> None:\n user_choice = Menu.prompt_view_transactions()\n if user_choice == 5:\n print(\"Returning to main menu...\")\n return\n\n budget_category = BudgetManager.category_mapping[user_choice]\n print(f\"\\nTransactions in the {budget_category.value} \"\n f\"category: \")\n for tx in self.user.tx_manager:\n if tx.budget_category == user_choice:\n print(f\"\\n{tx}\")", "def user_history(self):\n self.query_1 = \"SELECT * FROM orders WHERE user_id=%s\"\n self.input_1 = (self.user_id,) \n self.event = \"user_history\"\n self.message = \"Order history fetched successfully.\"\n self.error = \"Unable to fetch order history.\"", "def history():\n userid = session[\"user_id\"]\n history = db.execute(\"SELECT * FROM history WHERE id=:uid\", uid=userid)\n dic = {}\n data = []\n for row in history:\n # print(row)\n dic[\"symbol\"] = row[\"symbol\"]\n dic[\"shares\"] = row[\"shares\"]\n dic[\"price\"] = usd(row[\"price\"])\n dic[\"time\"] = row[\"time\"]\n data.append(dic.copy())\n # print(data)\n return render_template(\"history.html\", data=data)", "def history():\n\n if request.method == 'POST':\n user_input_uuid = request.form['uuid']\n\n dm = DatabaseManager()\n genes, diseases, uuid, query, genpanel, date =\\\n dm.retreieve_zoekopdracht(user_input_uuid)\n\n make_session(\"uuid\", uuid, 2)\n\n return redirect(url_for('vis_results'))\n\n hislis = []\n\n if session.get('history'):\n hislis = reversed(session['history'])\n\n return render_template(\"history.html\", hislis=hislis)", "def QueryHistory(self):\n return []", "def history(self):\n return self.info['history']", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def get_withdrawal_history(self, currency=None):\n if not currency:\n currency = \"\"\n return self.__call__('balance', \"getwithdrawalhistory\", \n {\"currencyname\": currency})", "def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def get_history(self):\n return self.history", "def account_history(self, account=None, type='all', range=\"all\"):\n \n if not (utils.check(type) and utils.check(range)):\n return {}\n \n # Imply account\n if account == None:\n account = self.params['account']\n \n # Assemble URL\n url = self.endpoints['base'] +\\\n 'accounts/' +\\\n str(account) +\\\n '/history.json'\n # Add parameters\n data = {\n 'range':range,\n 'transactions':type\n }\n \n # Create HTTP Request objects\n session = requests.Session()\n auth = self.create_auth()\n req = requests.Request('GET',url,params=data,auth=auth).prepare()\n \n \n results = {'response':session.send(req).json()}\n results['request'] = utils.pretty_print_POST(req)\n \n return results['response']['response']['transactions']['transaction']", "def history():\n files = os.listdir(app.config['SEGMENTS_FOLDER'])\n if len(files) <= 3:\n flash('There is no history yet', 'warning')\n return redirect(url_for('home'))\n\n range_list, segments_list, full_track_dict_list = generate_track_and_segments_data(app, files)\n\n return render_template(\"history.html\", segments_list=segments_list,\n full_track_dict_list=full_track_dict_list,\n range_list=range_list,\n title=\"history\")", "def view_transactions(request, id):\n account = get_object_or_404(Account, pk=id, user=request.user)\n return render(request, 'ledger/pages/view_transactions.html', {\n 'title': \"View Transactions\",\n 'breadcrumbs': [account],\n 'account': account,\n })", "def history():\n\n #Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT * FROM history WHERE id = :id\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n return render_template(\"history.html\", ports=ports)", "def get_deposit_history(self, asset: Asset, start_time: Optional[int] = None,\n end_time: Optional[int] = None, receive_window: Optional[int] = None):\n api_params = {\n \"asset\": asset.value,\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if start_time is not None:\n api_params['startTime'] = start_time\n\n if end_time is not None:\n api_params['endTime'] = end_time\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/deposit/history', params=api_params)", "def generate_history(self):\n self.reporter.generate()", "async def history(message, client, extra_args):\n\n if not extra_args or not (user_id := utils.from_mention(extra_args[0])):\n user_id = message.author.id\n\n @database.query\n def get_transactions(conn):\n cursor = conn.cursor()\n cursor.execute(\n \"SELECT * FROM funnypts WHERE awarder = ? OR awardee = ? ORDER BY date DESC\", (user_id, user_id))\n transactions = cursor.fetchall()\n cursor.close()\n conn.close()\n return transactions\n\n if not (transactions := get_transactions()):\n await message.channel.send(\"THIS USER HAS NO HISTORY, THEY SHOULD THOUGH\")\n return\n\n @utils.paginated_embeds\n def populate(embed, entry, entry_number):\n awarder = client.get_user(entry[0]).name.split(\"#\", 1)[0]\n awardee = client.get_user(entry[1]).name.split(\"#\", 1)[0]\n transaction = \"GIVEN TO\" if entry[3] > 0 else \"TAKEN FROM\"\n date = entry[4].split(\" \", 1)[0]\n reason = \"\\\"{0}\\\"\".format(entry[2])\n\n embed.add_field(\n name=\"{0} — {2} — {1} • {3}\".format(awarder, awardee, transaction, date), value=reason, inline=False)\n\n title = f\"{client.get_user(user_id).name}'s FUNNYPOINT HISTORY\"\n embeds = populate(title, transactions, page_length=5)\n await utils.sauce_pages(embeds, message, client)", "def history(ticker):\n head, body = gethistory(ticker)\n html_str = htmltable(head, body)\n return html_str", "def see_all_transfers(request):\n transfers = Transaction.objects.all().order_by('-executed_time')\n return render(request, 'app/allTransfers.html', {'transfers': transfers})", "def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')", "def history():\n backup_history()\n yield\n reset_history()", "def transaction_list(request, model_class=Transaction, template_name='budget/transactions/list.html'):\n transaction_list = model_class.active.order_by('-date', '-created')\n try:\n paginator = Paginator(transaction_list, getattr(settings, 'BUDGET_LIST_PER_PAGE', 50))\n page = paginator.page(request.GET.get('page', 1))\n transactions = page.object_list\n except InvalidPage:\n raise Http404('Invalid page requested.')\n return render_to_response(template_name, {\n 'transactions': transactions,\n 'paginator': paginator,\n 'page': page,\n }, context_instance=RequestContext(request))", "def history_list(name):\n service_histories = request_service_history(name)\n table = present(lambda: service_histories,\n renderer='table',\n headers=['History Version', 'Service Name', 'Date Created', 'Manifest'],\n columns=['id', 'name', 'created_at', 'manifest'])\n if table:\n click.echo(table)\n else:\n click.echo('There is no record of your service deployments available.')\n # click.echo('https://docs.fandogh.cloud/docs/services.html\\n')", "def history():\n if request.method == \"GET\":\n \n user_id = int(session.get('user_id'))\n user_data = db.execute('''SELECT * FROM history WHERE user_id = :user_id''', user_id = user_id)\n \n if not user_data:\n return render_template('quote.html')\n \n #create lists of values for sake of returning them to F2E\n portfolio = []\n \n for i in user_data:\n #getting data from table\n date = i.get('date')\n symbol = i.get('symbol')\n name = i.get('stock_name')\n quantity = i.get('quantity')\n price = round(float(i.get('price')), 2)\n action = str(i.get('deal'))\n \n #inserting data into a list\n a_dict = {\n 'date': date, 'symbol': symbol, \n 'name': name, 'price': price, \n 'quantity': quantity, 'action': action\n }\n portfolio.append(a_dict)\n \n return render_template('history.html',\n portfolio=portfolio)\n else:\n return render_template('index.html')", "def account_df_history(self, improve=False):\n return(self.account_df('history', improve))", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass" ]
[ "0.82002884", "0.8140903", "0.8114919", "0.8042388", "0.8033679", "0.80168986", "0.79338187", "0.7822898", "0.77456313", "0.77341163", "0.7718728", "0.7716838", "0.7708978", "0.7628142", "0.7624304", "0.7530955", "0.74965966", "0.7481556", "0.74478734", "0.7423578", "0.7382277", "0.7288208", "0.72825384", "0.72540516", "0.7240194", "0.7233303", "0.7193252", "0.7184091", "0.7178525", "0.71768135", "0.7147903", "0.7144208", "0.7143306", "0.71186686", "0.7105528", "0.7099931", "0.7093764", "0.7089086", "0.70393765", "0.70249367", "0.7018011", "0.701332", "0.7010136", "0.6967728", "0.69604427", "0.69457144", "0.6897571", "0.68591934", "0.681676", "0.68055445", "0.67798513", "0.6772446", "0.675553", "0.6723649", "0.6720259", "0.66986763", "0.6693779", "0.6682868", "0.66739595", "0.6656746", "0.6606525", "0.66026926", "0.6577423", "0.65742314", "0.65712106", "0.65543926", "0.65473205", "0.6535811", "0.65318495", "0.65282375", "0.65255904", "0.6510534", "0.64600503", "0.6457786", "0.64558256", "0.64352304", "0.6426731", "0.6421675", "0.6371526", "0.6371526", "0.6371526", "0.6316858", "0.62508285", "0.6249339", "0.62471", "0.62274307", "0.622531", "0.62213767", "0.62128365", "0.619218", "0.61842453", "0.6180837", "0.61752397", "0.6164259", "0.6141393", "0.6132607", "0.6131243", "0.61254805", "0.61166203", "0.61134505" ]
0.7030488
39
Sell shares of stock
def sell(): if request.method == "GET": symbols = [] table_name = f"stocks_user{session.get('user_id')}" rows = db.execute('SELECT DISTINCT stock_symbol FROM ? WHERE NOT stock_symbol="DINHEIRO" GROUP BY stock_symbol HAVING SUM(shares) >= 1', table_name) for row in rows: symbols.append(row["stock_symbol"]) return render_template('sell.html', symbols=symbols) elif request.method == "POST": symbols = [] table_name = f"stocks_user{session.get('user_id')}" rows = db.execute('SELECT DISTINCT stock_symbol FROM ? WHERE NOT stock_symbol="DINHEIRO" GROUP BY stock_symbol HAVING SUM(shares) >= 1', table_name) for row in rows: symbols.append(row["stock_symbol"]) if request.form.get("symbol") not in symbols: return apology("Código de ação inválido") shares = db.execute("SELECT SUM(shares) FROM ? WHERE stock_symbol = ?", table_name, request.form.get("symbol"))[0]["SUM(shares)"] if not request.form.get("shares"): return apology("Digite a quantidade de ações") elif int(request.form.get("shares")) > shares: return apology("Você não tem tantas ações") elif int(request.form.get("shares")) <= 0: return apology("Quantidade de ações não positiva") else: current_price = lookup(request.form.get("symbol"))['price'] money_received = current_price * int(request.form.get("shares")) db.execute("INSERT INTO ? (stock_symbol, shares, price, time) VALUES(?, ?, ?, ?)", table_name, request.form.get("symbol"), -(int(request.form.get("shares"))), current_price, time_date()) db.execute("UPDATE users SET dinheiro = dinheiro + ? WHERE id = ?", money_received, session.get("user_id")) return redirect('/')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sell_stock(self, symbol):\n amount_to_sell = self.get_equity(symbol)\n chirp.order_sell_fractional_by_price(symbol, amount_to_sell)\n self.L.add_line('', symbol, 'SOLD', amount_to_sell)", "async def sell(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot sell less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name not in user_stocks:\n\t\t\t\tawait ctx.send(f'You do not have any shares of {name}.')\n\t\t\t\treturn\n\t\t\tif shares > user_stocks[name]['count']:\n\t\t\t\tawait ctx.send(\n\t\t\t\t\tf'You do not have enough shares of {name}. '\n\t\t\t\t\tf'You only have {user_stocks[name]} share{plural}.'\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\tuser_stocks[name]['count'] -= shares\n\t\t\tif user_stocks[name]['count'] == 0:\n\t\t\t\tdel user_stocks[name]\n\t\tbal = await bank.deposit_credits(ctx.author, shares * price)\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(\n\t\t\tf'You sold {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def sell_stock (self, ticker, sell_date):\n \n self.__validate_sell__() \n self.__get_sell_share_price__(ticker, sell_date)\n self.__calc_profit_from_sales__() \n self.__update_sell_delta_amount__()\n self.__save_sell__()\n\n del self.invested[ticker]", "def sell(self, stock, amount):\n self.orders[stock] -= amount", "def sell():\n \n user_id = session[\"user_id\"]\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n \n if request.method == \"POST\":\n \n # get required symbol\n symbol = request.form.get(\"symbol\").upper()\n try:\n qty = int(request.form.get(\"qty\"))\n except ValueError:\n return apology(\"QTY is empty!\", 403)\n \n # proceed buy function\n sell_result: Tuple[float, str] = sell_shares(db, user_id, symbol, qty )\n if sell_result[0] == -1:\n return apology(sell_result[1], 403)\n\n return redirect(\"/\")", "def marketSell(self, currency_pair, amount):\n # calcular o rate num 'for'\n bids = rOrderBook(currency_pair=currency_pair, field='bids')\n list_resp = []\n for bid in bids:\n if bid[1] < amount:\n sold = self.limitSell(currency_pair, rate=bid[0], amount=bid[1], ioc=True)\n list_resp.append(sold)\n amount -= bid[0]\n elif bid[1] >= amount:\n sold = self.limitSell(currency_pair, rate=bid[0], amount=amount, ioc=True)\n list_resp.append(sold)\n amount -= amount\n break\n return list_resp", "def sell():\n if request.method == \"POST\":\n # Ensure data is inputted\n if not request.form.get(\"symbol\"):\n return apology(\"Insert symbol\", 403)\n \n if not request.form.get(\"shares\"):\n return apology(\"Insert number of shares to sell\", 403)\n \n # Ensure shares value is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough shares to sell \n share_count_dict = db.execute(\"SELECT share_count FROM shares WHERE user_id=:usid AND share=:share\", usid=session[\"user_id\"], share=request.form.get(\"symbol\").upper())\n share_count = int(share_count_dict[0][\"share_count\"])\n \n if int(request.form.get(\"shares\")) > share_count:\n return apology(\"You don't own enough shares\", 403)\n \n # Create variables\n symbol = request.form.get(\"symbol\").upper()\n quantity = int(request.form.get(\"shares\"))\n \n # Add cash to user data\n new_cash = float(lookup(symbol)[\"price\"]) * quantity\n db.execute(\"UPDATE users SET cash= cash + :cash WHERE id=:usid\", cash=new_cash, usid=session[\"user_id\"]) \n \n # Remove shares of user data\n db.execute(\"UPDATE shares SET share_count = share_count - :shares WHERE user_id=:usid AND share = :share\", shares=quantity,share=symbol, usid=session[\"user_id\"])\n db.execute(\"DELETE FROM shares WHERE user_id=:usid AND share_count = :shares\", usid=session[\"user_id\"], shares=0)\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol, shares='-' + str(quantity), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(lookup(symbol)[\"price\"]))\n \n return redirect(\"/\")\n \n else:\n # Create list with purchased symbols\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id=:usid\", usid=session[\"user_id\"])\n symbol_list = [None] * len(symbol_dicts)\n \n # Insert symbols into list\n for i in range(len(symbol_dicts)):\n symbol_list[i] = symbol_dicts[i][\"share\"]\n \n return render_template(\"sell.html\", longitude=len(symbol_dicts), symbols=symbol_list)", "def sell():\n if request.method == \"GET\":\n return render_template('sell.html')\n \n if request.method == \"POST\":\n symbol = request.form['symbol']\n shares = request.form['shares']\n stock = lookup(symbol)\n \n if not stock:\n return apology('Invalid symbol')\n \n user_shares = db.execute(\"SELECT shares FROM profile \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n if not user_shares or int(user_shares[0][\"shares\"]) < int(shares):\n return apology(\"Not enough shares\")\n db.execute(\"INSERT INTO history (company, shares, value, id, date) \\\n VALUES(:symbol, :shares, :price, :id, :date)\", \\\n symbol=stock[\"symbol\"], shares=-int(shares), \\\n price=stock[\"price\"], id=session[\"user_id\"], date = str(date.today())) \n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n \n shares_total = user_shares[0][\"shares\"] - int(shares)\n if shares_total == 0:\n db.execute(\"DELETE FROM profile \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n else:\n db.execute(\"UPDATE profile SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=shares_total, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n \n return redirect(url_for(\"index\"))", "def sell():\n\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n # ensure proper symbol\n stock = lookup(request.form.get(\"symbol\"))\n if not stock:\n return apology(\"Invalid Symbol\")\n\n # ensure proper number of shares\n try:\n shares = int(request.form.get(\"shares\"))\n if shares < 0:\n return apology(\"Amount of shares must be greater than 0\")\n except:\n return apology(\"Amount of shares must be greater than 0\")\n\n # select the symbol shares of that user\n user_shares = db.execute(\"SELECT shares FROM portfolio \\\n WHERE id = :id AND symbol=:symbol\", \\\n id=session[\"user_id\"], symbol=stock[\"symbol\"])\n\n # check if enough shares to sell\n if not user_shares or int(user_shares[0][\"shares\"]) < shares:\n return apology(\"You don't hold enough shares\")\n\n now = datetime.now()\n date_time = now.strftime(\"%Y-%m-%d %H:%M\")\n\n # update history of a sell\n db.execute(\"INSERT INTO history (symbol, shares, price, id, method, times, totaltarns) \\\n VALUES(:symbol, :shares, :price, :id, :method, :times, :totaltrans)\", \\\n symbol=stock[\"symbol\"], shares=-shares, \\\n price=usd(stock[\"price\"]), id=session[\"user_id\"], method= \"sell\", times= date_time, totaltrans = shares * stock[\"price\"])\n\n # update user cash (increase)\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", \\\n id=session[\"user_id\"], \\\n purchase=stock[\"price\"] * float(shares))\n\n # decrement the shares count\n amountshares = user_shares[0][\"shares\"] - shares\n\n # if after decrement is zero, delete shares from portfolio\n if amountshares == 0:\n db.execute(\"DELETE FROM portfolio \\\n WHERE id=:id AND symbol=:symbol\", \\\n id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n # otherwise, update portfolio shares count\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares \\\n WHERE id=:id AND symbol=:symbol\", \\\n shares=amountshares, id=session[\"user_id\"], \\\n symbol=stock[\"symbol\"])\n\n # return to index\n return redirect(url_for(\"index\"))", "def sell():\n if request.method == \"POST\":\n bef = db.execute(\"SELECT symbol FROM ind WHERE user_id = ?\", session[\"user_id\"])\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which valid stock to sell\", 403)\n symbol = request.form.get(\"symbol\")\n p = db.execute(\"SELECT COUNT(symbol) FROM ind WHERE user_id = ?\", session[\"user_id\"])\n q = 0\n\n for i in range(int(p[0][\"COUNT(symbol)\"])):\n if symbol == bef[i][\"symbol\"]:\n q = 1\n if q == 0:\n return apology(\"Please specify which valid stock to sell\", 403)\n if not request.form.get(\"shares\"):\n return apology(\"Please specify how many stocks you want to sell\", 403)\n if int(request.form.get(\"shares\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"shares\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) < int(request.form.get(\"shares\")):\n return apology(\"You do not own that many shares\", 403)\n shares = int(request.form.get(\"shares\"))\n db.execute(\"CREATE TABLE IF NOT EXISTS sells (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, shares INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"shares\"))\n money = bro[0][\"cash\"]\n money = money + cost\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"INSERT INTO sells(user_id, symbol, name, price, shares, cost, time) VALUES (:user_id, :symbol, :name, :price, :shares, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], shares = shares, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"SOLD\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = shares, cost = cost, time = datetime.datetime.now())\n\n db.execute(\"UPDATE ind SET nos = ? WHERE symbol = ? AND user_id = ?\", int(hav[0][\"nos\"]) - shares, request.form.get(\"symbol\"), session[\"user_id\"])\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) == 0:\n db.execute(\"DELETE FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n return redirect(\"/\")\n\n else:\n stocks = db.execute(\"SELECT * FROM ind WHERE user_id = ?\", session[\"user_id\"])\n\n return render_template(\"sell.html\", stocks = stocks)", "def sell(self, price, volume):\r\n self.order(\"ask\", price, volume)", "def sell():\n \n # if user reached route via POST, check all fields are filled\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\"):\n return apology(\"must provide symbol and number of shares\")\n \n # use lookup function to get stock info\n quote = lookup(request.form.get(\"symbol\"))\n \n # ensure validity of form\n if quote == None:\n return apology(\"invalid symbol\")\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must provide positive integer\")\n \n # initiate variables\n shares = int(request.form.get(\"shares\"))\n stocks = []\n \n # obtain user's stock information from portfolio database\n stocks = db.execute(\"SELECT shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n # check that user actually owns enough stock, or any stock at all\n if stocks == []:\n return apology(\"you don't own any of this stock\")\n if shares > stocks[0][\"shares\"]:\n return apology(\"invalid number of shares\")\n \n # calculate price per share and cost of all shares\n price = round(float(quote[\"price\"]),2)\n cost = round(float(shares * price),2)\n \n # update user's cash balance\n db.execute(\"UPDATE users SET cash = cash + :cost WHERE id = :id\", cost = cost, id=session[\"user_id\"])\n \n # if there are still shares leftover after sale, update row\n if shares < stocks[0][\"shares\"]:\n db.execute(\"UPDATE portfolio SET shares = shares - :shares WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], shares = shares, symbol = quote[\"symbol\"])\n \n # otherwise, if not shares leftover, remove row from portfolio entirely\n elif shares == stocks[0][\"shares\"]:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol = quote[\"symbol\"])\n \n db.execute(\"INSERT INTO history (id,symbol,shares,price,date) VALUES (:id,:symbol,:shares,:price,datetime('now'))\",id=session[\"user_id\"], symbol=quote[\"symbol\"],shares=-shares,price=price)\n \n flash('Sold!')\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET\n else:\n return render_template(\"sell.html\")", "def Sell(self, X, Y):\n if (self.share[X] - int(Y)) * (1 + self.taxe) < 0:\n raise TradeError(\"Not Enough Share\")\n self.share[X] -= int(Y)\n self.money += int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"SELL:{str(int(Y))}:{str(X)}\", flush = True)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"Symbol\")\n if symbol is None:\n return apology(\"Enter a symbol\", 403)\n shares = request.form.get(\"Shares\")\n if int(shares) < 0:\n return apology(\"Please enter postive shares\", 403)\n\n stock = lookup(symbol)\n rows = db.execute(\"SELECT Symbol, SUM(Shares) as totalShares FROM cash WHERE id=:id GROUP BY Symbol HAVING totalShares > 0\", id=session[\"user_id\"])\n for row in rows:\n if row[\"Symbol\"] == symbol:\n if int(shares) > row[\"totalShares\"]:\n return apology(\"Too many shares\")\n\n rows = db.execute(\"SELECT Cash FROM cash WHERE id=:id\", id=session[\"user_id\"])\n cash = rows[0][\"Cash\"]\n\n current_cash = cash + int(shares)*stock[\"price\"]\n db.execute(\"UPDATE cash SET Cash=:current_cash WHERE id=:id\", current_cash = current_cash, id=session[\"user_id\"])\n db.execute(\"INSERT INTO cash (id, Symbol, Name, Shares) VALUES (:id, :Symbol, :Name, :Shares)\", id=session[\"user_id\"], Symbol=stock[\"symbol\"], Name=stock[\"name\"], Shares=-1*int(shares))\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n rows = db.execute(\"SELECT Symbol FROM cash WHERE id=:id GROUP BY Symbol HAVING SUM(Shares) > 0\", id=session[\"user_id\"])\n # Shorthand for obtaining the symbol for every row in rows. So would output AAPL e.g.\n return render_template(\"sell.html\", symbols=[ row[\"Symbol\"] for row in rows ])", "def sell(self,\n currency_pair,\n rate,\n amount):\n pass", "def sell_all(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price -= np.abs(slip_factor)\n\n self.trade_manager.sell_all(from_symbol, to_symbol, price, amount, date)", "def sell():\n userid = session[\"user_id\"]\n stocks = db.execute(\"SELECT symbol FROM purchase WHERE userid = :userid GROUP BY symbol\",userid=userid)\n\n if request.method == \"POST\":\n symbol_sell = request.form.get(\"symbol\")\n shares_sell = float(request.form.get(\"shares\"))\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid=userid, symbol=symbol_sell)\n if shares_info[0][\"shares_sum\"] < shares_sell:\n return apology(\"You don't have that many shares\", 400)\n else:\n current = lookup(symbol_sell)\n price = current[\"price\"]\n amount = -shares_sell * price\n cash = db.execute(\"SELECT cash FROM users WHERE id =:userid\", userid=userid)\n balance = cash[0][\"cash\"] - amount\n db.execute(\"INSERT INTO purchase (userid, symbol, shares, tot) VALUES(:userid, :symbol, :shares, :tot)\",\n userid=userid, symbol=symbol_sell, shares=-shares_sell, tot=amount)\n db.execute(\"UPDATE users SET cash = :balance WHERE id = :userid\", balance=balance, userid=userid)\n flash(\"SOLD!!\")\n return redirect(\"/\")\n else:\n list_symbol = list()\n for symbol in stocks:\n shares_info = db.execute(\"SELECT SUM(shares) AS shares_sum FROM purchase\\\n WHERE userid = :userid GROUP BY symbol HAVING symbol = :symbol\", userid = userid, symbol=symbol[\"symbol\"])\n current_shares = shares_info[0]\n if shares_info[0][\"shares_sum\"]:\n list_symbol.append(symbol[\"symbol\"])\n return render_template(\"sell.html\", list_symbol=list_symbol)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n shares = request.form.get('shares')\n\n if not symbol or not shares or symbol == \"Select Stock\":\n return apology(\"Please input a valid symbol and number of shares\")\n elif int(shares) <= 0:\n return apology(\"Please input a positive number for shares\")\n else:\n symbol = symbol.lower()\n shares = int(shares)\n get_cur_shares = db.execute(\n \"SELECT SUM(shares) FROM History WHERE id = :id AND symbol = :symbol GROUP BY symbol\", id=session['user_id'], symbol=symbol)\n try:\n cur_shares = [share['SUM(shares)'] for share in get_cur_shares][0]\n except IndexError:\n return apology(\"Please input a valid number of shares\")\n if shares > cur_shares:\n return apology(\"Sorry, you don't have enough shares to sell\")\n else:\n cur_price = float(lookup(symbol)['price'])\n sell_val = cur_price * float(shares)\n sell_val = float(sell_val)\n get_bal = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n balance = [bal['cash'] for bal in get_bal][0]\n balance = float(balance)\n new_balance = balance + sell_val\n company = lookup(symbol)['name']\n new_database_balance = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",\n cash=new_balance, id=session['user_id'])\n new_database_transaction = db.execute(\"INSERT INTO History ('symbol', 'company', 'shares', 'price', 'totalprice', 'id', 'transaction_type') VALUES (:symbol, :company, :shares, :price, :totalprice, :id, :transaction_type)\",\n symbol=symbol, company=company, shares=-shares, price=cur_price,\n totalprice=sell_val, id=session['user_id'], transaction_type=\"SELL\")\n return redirect(\"/\")\n else:\n get_symbols = db.execute(\n \"SELECT symbol FROM History WHERE id = :id GROUP BY symbol HAVING SUM(shares) > 0\", id=session['user_id'])\n if not get_symbols:\n return apology(\"Sorry, could not find valid symbol\")\n else:\n symbols = [symbol['symbol'] for symbol in get_symbols]\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n \n if request.method == \"POST\":\n if not request.form.get('symbol'):\n return apology('must provide symbol')\n \n if not request.form.get('shares'):\n return apology('must provide shares')\n \n symbol = (request.form.get(\"symbol\")).upper()\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n username = row[0]['username']\n \n result = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n if not result:\n return apology('no symbol available')\n \n shares = int(request.form.get('shares'))\n \n if shares <= 0:\n return apology('shares not positive')\n \n row = db.execute(\"SELECT * FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n old_shares = row[0]['shares']\n \n if shares > old_shares:\n return apology('number exceeds available shares')\n \n new_shares = old_shares - shares\n \n if new_shares == 0:\n db.execute(\"DELETE FROM portfolio WHERE symbol=:symbol AND username=:username\", symbol=symbol, username=username)\n else:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE symbol=:symbol AND username=:username\", shares=new_shares, symbol=symbol, username=username)\n \n quote = lookup(symbol)\n price = quote['price']\n total_p = price * shares\n \n row = db.execute(\"SELECT * FROM users WHERE id=:id\", id=session['user_id'])\n old_cash = row[0]['cash']\n \n new_cash = old_cash + total_p\n \n db.execute(\"UPDATE users SET cash=:cash WHERE id=:id\", cash=new_cash, id=session['user_id'])\n \n #current_time = time.strftime(time.localtime(\"%H:%M:%S %m/%d/%Y\"))\n current_time = time.asctime( time.localtime(time.time()) )\n db.execute(\"INSERT INTO history (username, time, symbol, shares) VALUES (:username, :time, :symbol, :shares)\", username=username,time=current_time,symbol=symbol,shares=0-shares)\n \n # redirect user to home page\n return redirect(url_for(\"index\"))\n \n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"sell.html\")", "def sell():\n\n if request.method == \"POST\":\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n if not symbol:\n return apology(\"please select a valid symbol\")\n\n target_stock = db.execute(\"SELECT *, sum(shares) FROM transactions WHERE user=:user AND symbol=:symbol\",\n user=user, symbol=symbol)\n print(target_stock)\n if not shares:\n return apology(\"must provide how many shares to sell\")\n\n elif shares > target_stock[0]['sum(shares)'] or shares < 1:\n return apology(\"shares must be more than 0 and less than \" + str(target_stock[0]['shares']))\n\n query = lookup(symbol)\n price = query['price']\n name = query['name']\n cash = entry[0]['cash']\n\n db.execute(\"INSERT INTO transactions (id, user, symbol, name, price, shares) VALUES(NULL, :user, :symbol, :name, :price, :shares)\",\n user=user, symbol=symbol, name=target_stock[0]['name'], price=price, shares=-int(shares))\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash+price*shares, id=session['user_id'])\n\n return redirect(url_for(\"index\"))\n\n else:\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n\n return render_template(\"sell.html\", stocks=owned)", "def sell():\n if request.method == \"GET\":\n rows = db.execute(text(\n \"SELECT symbol, sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id GROUP BY symbol\"),\n id=session[\"user_id\"])\n symbols = [row[\"symbol\"] for row in rows if row[\"shares\"]]\n return render_template(\"sell.html\", symbols=symbols,\n symbol=request.args.get(\"symbol\"))\n\n if not request.form.get(\"symbol\"):\n return apology(\"missing symbol\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = db.execute(text(\n \"SELECT sum(shares) as shares FROM transactions \"\n \"WHERE user_id=:id AND symbol=:symbol\"),\n id=session[\"user_id\"],\n symbol=request.form.get(\"symbol\")).fetchone()[\"shares\"]\n requested_shares = int(request.form.get(\"shares\"))\n if requested_shares > owned_shares:\n return apology(\"too many shares\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n db.execute(text(\n \"INSERT INTO transactions (user_id, symbol, shares, price) \"\n \"VALUES (:u, :sy, :sh, :p)\"),\n u=session[\"user_id\"],\n sy=request.form.get(\"symbol\"),\n sh=-requested_shares,\n p=quote[\"price\"])\n sell_price = int(request.form.get(\"shares\")) * quote[\"price\"]\n db.execute(text(\"UPDATE users SET cash=cash+:c WHERE id=:id\"),\n c=sell_price,\n id=session[\"user_id\"])\n flash(\"Sold!\")\n return redirect(\"/\")", "def sell(self, amount):\n trades = []\n sell_amount = 0\n precision = pow(10, self.pair.get_base_token().get_decimals() - self.pair.get_quote_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_BUY])):\n offer = self.book[Trade.WAY_BUY][i]\n amount_quote = offer.get_quote_amount()\n amount_base = offer.get_base_amount()\n price = offer.get_price()\n\n if amount_quote >= amount:\n tmp = amount * price * precision\n tmp = int(tmp)\n trade = Trade(self.pair, Trade.WAY_SELL, price, tmp, amount, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n sell_amount = sell_amount + trade.get_amount_base()\n trades.append(trade)\n return trades, int(sell_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue\n '''\n trade = Trade(self.pair, Trade.WAY_SELL, price, amount_base, amount_quote, time.time(), fee_currency=self.pair.get_exchange().get_fee_token())\n amount = amount - amount_quote\n sell_amount = sell_amount + trade.get_amount_base()\n trades = trades + [trade]\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def sell():\n\n if request.method == \"POST\":\n sellstock = request.form.get(\"symbol\")\n sellq = int(request.form.get(\"shares\"))\n if sellstock == None:\n return apology(\"Please select a stock symbol to sell.\")\n if sellq < 0:\n return apology(\"Please enter a valid quantity of stocks to sell\")\n invq = db.execute(\"SELECT quantity FROM inventory WHERE userid = :uid AND symbol = :sy\",\n {\"uid\":session[\"user_id\"],\"sy\":sellstock})[0][\"quantity\"]\n if sellq > invq:\n return apology(\"You don't have enough shares.\")\n stock = lookup(sellstock)\n cost = round(sellq*stock[\"price\"], 2)\n db.execute(\"INSERT INTO shares (stock,symbol,value,quantity,cost,userid) VALUES(:st,:sy,:va,:qu,:co,:uid)\",\n {\"st\":stock[\"name\"],\"sy\":sellstock,\"va\":stock[\"price\"],\"qu\":sellq,\"co\":cost,\"uid\":session[\"user_id\"]})\n db.execute(\"UPDATE inventory SET quantity = :qu WHERE userid =:uid AND symbol = :sy\",\n {\"qu\":(invq-sellq),\"uid\":session[\"user_id\"],\"sy\":sellstock})\n db.execute(\"UPDATE users SET cash = cash + :cash WHERE id =:uid\", {\"cash\":cost,\"uid\":session[\"user_id\"]})\n flash(\"Shares successfully sold!\")\n return redirect(\"/\")\n inventory = db.execute(\"SELECT symbol FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n return render_template(\"sell.html\", context = inventory)", "def sellshares():\n # Initialise buy and sell share forms\n sellform = SellShareForm()\n # Validate and process form data\n if(sellform.validate_on_submit()):\n # Buys shares\n issuerID = sellform.sellsharecode.data\n quantity = sellform.sellquantity.data\n userID = current_user.userID\n # Call buyshare API\n sellshare = gdb.sellshare(userID, issuerID, quantity)\n if(sellshare):\n # Flash with success message\n flash(\"Share sale successful!\", category=\"success\")\n else:\n # Flash with warning message\n flash(\"Share sale unsuccessful!\", category=\"error\")\n # Redirect to reffering page or dashboard\n return redirect(request.referrer or url_for('main.dashboard'))", "def sell():\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"Must enter a symbol\", 400)\n num_shares = request.form.get(\"shares\")\n if not num_shares:\n return apology(\"Must enter some number of shares to sell\", 400)\n company_quote = lookup(symbol)\n if company_quote == None:\n return apology(\"Invalid Symbol\", 400)\n num_shares = int(num_shares)\n if num_shares <= 0:\n return apology(\"Must enter a positve number of shares to sell\", 400)\n\n rows = db.execute(\"SELECT id, symbol, numshares FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if len(rows) != 1:\n return apology(\"You do not have shares of \" + symbol, 400)\n if num_shares > rows[0][\"numshares\"]:\n return apology(\"You cannot sell more shares than you have\", 400)\n\n sale_value = num_shares * company_quote[\"price\"]\n\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\",\n id=session['user_id'])\n balance = balance[0][\"cash\"]\n new_balance = balance + sale_value\n date_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n # Update history in history table\n return_val = db.execute(\"INSERT INTO 'history' (id, symbol, shares, price, transacted) VALUES (:id, :symbol, :shares, :price, :transacted)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], shares=num_shares*-1, price=company_quote[\"price\"], transacted = date_time)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update total number and value of each shares (symbol) held in totalshares table\n rows = db.execute(\"SELECT id, symbol, numshares, totalvalue FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n new_numshares = rows[0][\"numshares\"] - num_shares\n new_totalvalue = rows[0][\"totalvalue\"] - sale_value\n return_val = db.execute(\"UPDATE totalshares SET numshares = :new_numshares, totalvalue = :new_totalvalue WHERE id = :id AND symbol = :symbol\",\n new_numshares=new_numshares, new_totalvalue=new_totalvalue, id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update balance in users table\n return_val = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=new_balance, id=session[\"user_id\"])\n if return_val != None:\n return redirect(\"/\")\n else:\n return apology(\"something went wrong\", 403)\n else:\n rows = db.execute(\"SELECT symbol, numshares FROM totalshares WHERE id = :id\", id=session[\"user_id\"])\n symbol_options = []\n if rows != None and len(rows) > 0:\n for row in rows:\n if row[\"numshares\"] > 0:\n symbol_options.append(row[\"symbol\"])\n return render_template(\"sell.html\", symbol_options=symbol_options)", "async def sell(self, ctx, amount : float, symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(amount, symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (ctx.message.author, portfolio.Value()))\n portfolio.Save()", "def sell():\n return apology(\"TODO\")\n if request.method == \"POST\":\n # Ensure symbol was submitted\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"must provide symbol\", 403)\n symbol = symbol.upper()\n\n # Ensure number of shares was submitted\n shares = request.form.get(\"shares\")\n if not shares:\n return apology(\"must provide shares\", 403)\n\n return render_template(\"sell.html\")", "def sell():\n if request.method == \"POST\":\n\n if not request.form.get(\"shares\"):\n return apology(\"gimme share\", 400)\n if not lookup(request.form.get(\"symbol\")):\n return apology(\"not correct stock\", 400)\n if not request.form.get(\"shares\").isdigit():\n return apology(\"sorry bro\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = money_list[0][\"cash\"]\n\n total_price = int(request.form.get(\"shares\")) * float(quote[\"price\"])\n\n units_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE id = :current_id AND stock = :stock_code\",\n current_id=session[\"user_id\"], stock_code=request.form.get(\"symbol\"))\n available_units = units_list[0][\"SUM(units)\"]\n\n if available_units < int(request.form.get(\"shares\")):\n return apology(\"no units bro\", 400)\n\n new_cash = available_money + total_price\n\n updating = db.execute(\"UPDATE users SET cash = :upd_cash WHERE id = :current_id\",\n upd_cash=new_cash, current_id=session[\"user_id\"])\n insertion = db.execute(\"INSERT INTO transactions (id, stock, units, price, time, type) VALUES (:current_id, :stock, :units, :price, :now, :type)\",\n current_id=session[\"user_id\"], stock=request.form.get(\"symbol\"), units=request.form.get(\"shares\"), price=float(quote[\"price\"]), now=datetime.datetime.now(), type=\"S\")\n\n money_upd_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money_upd = money_upd_list[0][\"cash\"]\n\n return render_template(\"sell_result.html\", shares=request.form.get(\"shares\"),\n symbol=request.form.get(\"symbol\"),\n price=usd(total_price),\n cash=usd(new_cash))\n else:\n available_stocks_info = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_stocks_list = []\n for element in available_stocks_info:\n if element[\"stock\"] not in available_stocks_list:\n available_stocks_list.append(element[\"stock\"])\n\n return render_template(\"sell.html\", available_stocks=available_stocks_list)", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\")\n elif not request.form.get(\"sharesnumber\"):\n return apology(\"must provide no of shares to sell\")\n elif '.' in request.form.get(\"sharesnumber\"):\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not request.form.get(\"sharesnumber\").isdigit():\n return apology(\"No of shares is positive integer Invalid!!\")\n elif not int(request.form.get(\"sharesnumber\")) > 0:\n return apology(\"No of shares is positive value Invalid!!\")\n \n result_dict = lookup(request.form.get(\"symbol\"))\n \n if result_dict == None:\n return apology(\"Symbol does not exist\")\n \n \n #Check No of Shares\n no_of_shares = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol = :symbol\",id=session[\"user_id\"],symbol =request.form.get(\"symbol\"))\n no_of_shares = int(no_of_shares[0]['shares'])\n if int(request.form.get(\"sharesnumber\")) > no_of_shares:\n return apology(\"Sorry!! Don't Have Enough shares\")\n \n result_cash = db.execute(\"SELECT * from users where id = :id\",id=session[\"user_id\"])\n net_cash = result_cash[0][\"cash\"]\n net_worth = int(request.form.get(\"sharesnumber\")) * result_dict['price']\n \n \n \n #Update Cash\n net_cash = net_cash + net_worth\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",cash= net_cash,id=session[\"user_id\"])\n \n #Update History Tables\n \n db.execute(\"INSERT INTO history(user_id,symbol,price,shares) VALUES(:id,:symbol,:price,:shares) \",id=session[\"user_id\"],symbol=result_dict['symbol'],price=result_dict['price'],shares=(-1)*int(request.form.get(\"sharesnumber\")))\n \n #Check Whether user has shares for same symbol\n rows = db.execute(\"SELECT * FROM netshares WHERE user_id = :id AND symbol=:symbol\",id=session[\"user_id\"],symbol=result_dict['symbol'])\n #Update NetShares Table\n if len(rows) == 0:\n db.execute(\"INSERT INTO netshares(user_id,symbol,shares) VALUES(:id,:symbol,:shares)\",id=session[\"user_id\"],symbol=result_dict['symbol'],shares=request.form.get(\"sharesnumber\"))\n else:\n db.execute(\"UPDATE netshares SET shares=:shares WHERE user_id = :id AND symbol=:symbol\",shares= -int(request.form.get(\"sharesnumber\"))+int(rows[0]['shares']),id=session[\"user_id\"],symbol=result_dict['symbol'])\n return redirect(url_for(\"index\"))\n \n else:\n return render_template(\"sell.html\")\n #return apology(\"TODO\")", "def sell():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"must select a stock\", 400)\n elif not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 400)\n elif float(request.form.get(\"shares\")) <= 0:\n return apology(\"number of shares must be greater than one\", 400)\n elif float(request.form.get(\"shares\")) > db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"]:\n return apology(\"you don't own enough shares\", 400)\n\n numberOfShares = float(request.form.get(\"shares\"))\n\n priceOfEachShare = db.execute(\"SELECT price FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\",\n userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"price\"]\n\n totalValue = numberOfShares * priceOfEachShare\n\n db.execute(\"UPDATE users SET cash = cash + {0} WHERE id=:userId\".format(totalValue), userId=session[\"user_id\"])\n\n db.execute(\"UPDATE portfolio SET number = number - {0} WHERE username=:username AND symbol=:symbol\".format(request.form.get(\"shares\")),\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n if db.execute(\"SELECT number FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))[0][\"number\"] == 0:\n db.execute(\"DELETE FROM portfolio WHERE username=:username AND symbol=:symbol\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"))\n\n db.execute(\"INSERT INTO history (username, symbol, buyorsell, number, price, date) VALUES(:username, :symbol, :buyorsell, :number, :price, :date)\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"],\n symbol=request.form.get(\"symbol\"), buyorsell=0, number=float(request.form.get(\"shares\")),\n price=priceOfEachShare, date=datetime.datetime.utcnow())\n\n return redirect(\"/\")\n\n else:\n symbolsList = db.execute(\"SELECT symbol FROM portfolio WHERE username=:username\",\n username=db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"])\n return render_template(\"sell.html\", stocks=symbolsList)", "def attempt_market_sell(self, decision: Decision, state: Series, is_backtest: bool = False, crypto: bool = False) -> Transaction:\n # Currently, selling will only support closing out our entire position\n # TODO: support partial sells in the future\n share_quantity = decision.quantity\n try: latest_price = self.latest_price(decision.symbol, state, is_backtest, crypto, 'sell')\n except: return Transaction(False, TransactionType.MarketSell, 0, 0, decision, state['date'])\n\n strike_price: float\n succeeded = True\n if is_backtest:\n c_type = 'crypto' if crypto else 'stock'\n spread = .01 if c_type == 'stock' else 0\n sell_fee = state['close'] * self.get_fee_pct(c_type)[1] + self.get_fixed_fee(c_type, state['close'], share_quantity)\n self.total_fees += sell_fee\n self.trade_volume_shares += share_quantity\n print(f'sell fee: {sell_fee} | trade volume: {self.trade_volume} | total fees: {self.total_fees}')\n strike_price = state['close'] - sell_fee - spread\n else:\n # TODO: Communicate with market here\n try:\n if crypto:\n print('attempting crypto market sell @ ', latest_price)\n (strike_price, share_quantity, succeeded) = asyncio.get_event_loop().run_until_complete(wait_for_cb_order_fill(self.cb_client, decision.contract, 'sell', share_quantity, latest_price))\n else:\n print('attempting ib market sell @ ', latest_price)\n # sell_order = MarketOrder('SELL', share_quantity)\n sell_order = LimitOrder('SELL', share_quantity, latest_price)\n (strike_price, share_quantity, succeeded) = asyncio.get_event_loop().run_until_complete(wait_for_ib_order_fill(self.ib_client.ib, sell_order, decision.contract))\n\n\n except Exception as e: # Failed to sell at limit price\n succeeded = False\n strike_price = 0\n share_quantity = 0\n \n self.trade_volume += (strike_price * share_quantity)\n return Transaction(succeeded, TransactionType.MarketSell, strike_price, share_quantity, decision, state['date'])", "def sell():\n\n table = db.execute(\"SELECT symbol FROM portfolio WHERE id=:id\", id=session[\"user_id\"])\n symbols = []\n for i in range(len(table)):\n symbols.append(table[i][\"symbol\"])\n\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n elif not request.form.get(\"shares\"):\n return apology(\"missing shares\", 400)\n\n owned_shares = int(db.execute(\"SELECT shares FROM portfolio where id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))[0][\"shares\"])\n\n if owned_shares < int(request.form.get(\"shares\")):\n return apology(\"Too many shares\", 400)\n\n updated_shares = owned_shares - int(request.form.get(\"shares\"))\n\n # update shares in portfolio\n if updated_shares > 0:\n db.execute(\"UPDATE portfolio SET shares=:shares WHERE id=:id AND symbol=:symbol\",\n shares=updated_shares, id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n else:\n db.execute(\"DELETE FROM portfolio WHERE id=:id AND symbol=:symbol\",\n id=session[\"user_id\"], symbol=request.form.get(\"symbol\"))\n\n # update cash in database\n quote = lookup(request.form.get(\"symbol\"))\n amount = quote[\"price\"] * float(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash = cash + :amount WHERE id=:id\", amount=amount, id=session[\"user_id\"])\n\n db.execute(\"INSERT INTO histories (symbol, shares, price, id) VALUES(:symbol, :shares, :price, :id)\",\n symbol=quote[\"symbol\"], shares=0-int(request.form.get(\"shares\")), price=usd(quote[\"price\"]), id=session[\"user_id\"])\n\n flash(\"Sold!\")\n return redirect(\"/\")\n\n else:\n return render_template(\"sell.html\", symbols=symbols)", "async def sell(self, ctx, quantity: int, symbol: str):\r\n symbol = symbol.upper()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n await self.market_open_check(ctx)\r\n await self.stock_symbol_check(ctx, db, symbol)\r\n \r\n inventory = self.iex.get_held_stock_quantity(db, company.id, symbol)\r\n if inventory < quantity:\r\n await ctx.send(f\"``{company.name}\\n{inventory} {symbol}``\")\r\n raise StonksError()\r\n\r\n price = self.iex.price(symbol)\r\n value = price * quantity\r\n self.iex.sell(db, company.id, symbol, quantity, price)\r\n await ctx.send(f\"``+{value} {company.name} ⯬ {quantity} {symbol} @ {price}``\")", "def sell():\n username = session.get(\"username\")\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n req_quantity = request.form.get(\"shares\")\n if not req_quantity.isdigit() or int(req_quantity)<=0:\n return apology(\"Quantity must be positive integer\", 400)\n req_quantity = int(req_quantity)\n status = \"sold\"\n\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n owned_stock = db.execute(\"SELECT SUM(quantity) FROM history WHERE username=:username GROUP BY stock_symbol HAVING stock_symbol=:symbol\",\n username=username, symbol=symbol)\n if owned_stock:\n owned_quantity = owned_stock[0][\"SUM(quantity)\"]\n stock = lookup(symbol)\n price = stock[\"price\"]\n name = stock[\"name\"]\n else:\n owned_quantity = 0\n if owned_quantity>=req_quantity:\n total_value = req_quantity * price\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :symbol, :price, :time, :quantity, :name, :status)\",\n username=username, symbol=symbol, price=price, time=time, quantity=-req_quantity, name=name, status=status)\n db.execute(\"UPDATE users SET cash = cash+:total_value WHERE username=:username\",\n total_value=total_value, username=username)\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n message = f\"Recorded sold {req_quantity} share(s) of {name} total {usd(total_value)}, your new cash balance is {usd(cash)}\"\n return render_template(\"sell.html\", message = message)\n else:\n return apology(\"Insufficient shares\", 400)\n # if db.execute()\n else:\n stock_options = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n stock_options = [s[\"stock_symbol\"] for s in stock_options]\n\n # print(f\"Stock options: {stock_options}\")\n return render_template(\"sell.html\", options = stock_options)", "def sell():\n userId = session[\"user_id\"]\n\n sharesOwned = db.execute(f\"SELECT symbol, SUM(shares) FROM transactions WHERE user_id={userId} GROUP BY symbol HAVING SUM(shares)>0\")\n\n if request.method == \"GET\":\n\n return render_template(\"sell.html\", sharesOwned=sharesOwned)\n\n elif request.method == \"POST\":\n\n symbolInput = request.form.get(\"symbol\")\n shares = float(request.form.get(\"shares\")) * (-1)\n\n symbolName = lookup(symbolInput)[\"name\"]\n symbolPrice = lookup(symbolInput)[\"price\"]\n symbolTicker = lookup(symbolInput)[\"symbol\"]\n\n shareCount = float(db.execute(f\"SELECT SUM(shares) FROM transactions WHERE user_id={userId} AND symbol='{symbolInput}' GROUP BY symbol HAVING SUM(shares)>0\")[0][\"SUM(shares)\"] * (-1))\n\n if symbolInput != symbolTicker or symbolInput == \"\" or shares == \"\" or shares > 0 or shares < shareCount:\n return apology(\"No sell for you senpai!\")\n\n else:\n totalPrice = shares * symbolPrice\n availableCash = float(db.execute(f\"SELECT cash FROM users WHERE id={userId}\")[0][\"cash\"])\n\n now = datetime.now()\n transTime = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n availableCash -= totalPrice\n\n db.execute(f\"UPDATE users SET cash = '{availableCash}' WHERE id = '{userId}'\")\n\n db.execute(f\"INSERT INTO transactions (trans_time, trans_type, user_id, symbol, price, shares, value, name, current_price) VALUES ('{transTime}','SELL','{userId}','{symbolTicker}','{symbolPrice}','{shares}','{totalPrice}','{symbolName}','{symbolPrice}')\")\n\n return redirect(\"/\")", "def sell():\n if request.method == \"GET\":\n symbols = Records.query.with_entities(Records.symbol).\\\n distinct().filter_by(user_id=session.get(\"user_id\")).all()\n return render_template(\"sell.html\", symbols=symbols)\n\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n record = db.session.query(db.func.sum(Records.shares).label(\"shares\")).\\\n group_by(Records.user_id).filter_by(symbol=symbol, user_id=session.get('user_id')).one()\n\n if shares > record.shares:\n return apology(f\"You can only sell { record.shares } shares or less than\", 400)\n\n quote = lookup(symbol)\n price = quote['price']\n value = round(shares * price, 2)\n\n user = Users.query.get(session.get('user_id'))\n user.cash += value\n\n record = Records(symbol=quote['symbol'], company_name=quote['name'],\n transact_type=\"sell\", shares=int('-'+str(shares)),\n price=price, user_id=user.id)\n\n db.session.add(record)\n db.session.commit()\n\n flash('Sold')\n return redirect(url_for('index'))", "def sell():\n userid = session[\"user_id\"]\n if request.method == \"GET\":\n symbol = db.execute(\"SELECT symbol FROM purchase WHERE id=:uid\",uid=userid)\n # print(symbol)\n symbols = []\n for s in symbol:\n temp = s[\"symbol\"]\n symbols.append(temp)\n # print(symbols)\n return render_template(\"sell.html\", symbols=symbols)\n else:\n symbol_entry = request.form.get(\"symbol\")\n shares_entry = int(request.form.get(\"shares\"))\n if not symbol_entry or not shares_entry:\n return apology(\"Please select both symbol and shares\")\n\n data = db.execute(\"SELECT symbol, shares FROM purchase WHERE id=:uid\",uid=userid)\n share_check = 0\n\n for s in data:\n if(s[\"symbol\"] == symbol_entry):\n share_check = s[\"shares\"]\n # print(share_check)\n if shares_entry > share_check:\n return apology(\"You don't have this many shares of this company\")\n\n current_cash = (db.execute(\"SELECT cash FROM users WHERE id=:uid\", uid=userid))[0].get(\"cash\")\n query = lookup(symbol_entry)\n share_price = query[\"price\"]\n sold_price = share_price * shares_entry\n\n db.execute(\"UPDATE users SET cash=:sold WHERE id=:uid\",sold=sold_price+current_cash, uid=userid)\n if shares_entry == share_check:\n db.execute(\"DELETE FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol_entry, uid=userid)\n else:\n db.execute(\"UPDATE purchase SET shares=:shares WHERE symbol=:symbol AND id=:uid\",shares=share_check-shares_entry,symbol=symbol_entry, uid=userid)\n\n nshare = -shares_entry\n dt = datetime.now(timezone(timedelta(hours=6)))\n dt = dt.strftime(\"%d-%m-%Y %H:%M:%S\")\n db.execute(\"INSERT INTO history (id, symbol, shares, price, time) VALUES (:userid, :symbol, :shares, :price, :time)\", userid=userid, symbol=symbol_entry,shares=nshare,price=share_price, time=dt)\n return render_template(\"sell.html\", message=\"Sold!\")\n print(data)", "def _sell(self, amount, price):\n params = {\"pair\": self.pair, \"type\" : \"sell\", \"rate\" : price, \"amount\" : amount}\n response = self._send_request(\"Trade\", params)\n if \"error\" in response:\n raise TradeException(response[\"error\"])", "def sell():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure positive number of shares was submitted\n elif not request.form.get(\"shares\") or int(request.form.get(\"shares\")) < 0:\n return apology(\"must provide positive number of shares\", 403)\n\n elif int(request.form.get(\"shares\")) > (db.execute(\"SELECT sum(shares) as shares FROM 'transaction' WHERE u_id = :user_id and symbol = :symbol\", user_id = session[\"user_id\"], symbol = request.form.get(\"symbol\")))[0][\"shares\"]:\n return apology(\"cannot sell more shares than owned\", 403)\n\n else:\n returned_quote = lookup(request.form.get(\"symbol\"))\n row = db.execute(\"SELECT * FROM users WHERE id = :user_id\", user_id = session[\"user_id\"])\n\n db.execute(\"INSERT INTO 'transaction' ('t_id','u_id','symbol','shares','price') VALUES (NULL,:u_id,:symbol,:shares,:price)\",\n u_id = session[\"user_id\"], symbol = returned_quote[\"symbol\"], shares = -1*int(request.form.get(\"shares\")), price = returned_quote[\"price\"])\n db.execute(\"UPDATE users SET cash = cash + :price * :shares WHERE id = :user_id\",\n price = returned_quote[\"price\"], shares = int(request.form.get(\"shares\")), user_id = session[\"user_id\"])\n\n flash(\"Sold\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n rows = db.execute(\"SELECT symbol, SUM(shares) as shares FROM 'transaction' WHERE u_id = :user_id GROUP BY symbol\", user_id = session[\"user_id\"])\n\n if len(rows) > 0:\n return render_template(\"sell.html\", rows = rows)\n else:\n return apology(\"no shares to sell\", 403)", "def sell():\n if request.method == \"POST\":\n dict=lookup(request.form.get(\"symbol\"))\n if not request.form.get(\"symbol\") or not request.form.get(\"shares\") or not lookup(request.form.get(\"symbol\")):\n return apology(\"Must provide valid symbol and positive integer\",400)\n else:\n row=db.execute(\"SELECT *FROM portofolio WHERE symbol=:s AND user_id=:u_i\",s=request.form.get(\"symbol\"),u_i=session[\"user_id\"])\n if len(row) == 0 or int(request.form.get(\"shares\")) > row[0][\"shares\"]:\n return apology(\"you don't have enough shares of this company\",400)\n else:\n db.execute(\"INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)\",s=dict[\"symbol\"],sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=time.asctime( time.localtime(time.time())),u_i=session[\"user_id\"],status='sold')\n db.execute(\"UPDATE portofolio SET shares =shares-:sh, price=:p, total=total-:t WHERE symbol=:s AND user_id=:u_i\",sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=dict[\"price\"] * int(request.form.get(\"shares\")),s=dict[\"symbol\"],u_i=session[\"user_id\"])\n db.execute(\"UPDATE users SET cash=cash+:extra WHERE id=:i\",extra=int(request.form.get(\"shares\")) * dict[\"price\"],i=session[\"user_id\"])\n db.execute(\"DELETE FROM portofolio WHERE shares=0\")\n return redirect(\"/\")\n else:\n rows=db.execute(\"SELECT *FROM portofolio where user_id=:u_i \",u_i=session[\"user_id\"])\n arr=[]\n for row in rows:\n arr.append(row['symbol'])\n return render_template(\"selling.html\",arr=arr)", "async def buy(self, ctx, name, shares: int):\n\t\tplural = 's' if shares != 1 else ''\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tif shares < 1:\n\t\t\tawait ctx.send('You cannot buy less than one share.')\n\t\t\treturn\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\ttry:\n\t\t\tbal = await bank.withdraw_credits(ctx.author, shares * price)\n\t\texcept ValueError:\n\t\t\tbal = await bank.get_balance(ctx.author)\n\t\t\tawait ctx.send(\n\t\t\t\tf'You cannot afford {shares} share{plural} of {name}. '\n\t\t\t\tf'It would cost {price * shares} {currency} ({price} {currency} each). '\n\t\t\t\tf'You only have {bal} {currency}.'\n\t\t\t)\n\t\t\treturn\n\t\tasync with self.config.user(ctx.author).stocks() as user_stocks:\n\t\t\tif name in user_stocks:\n\t\t\t\tuser_stocks[name]['count'] += shares\n\t\t\telse:\n\t\t\t\tuser_stocks[name] = {'count': shares, 'total_count': stock_data[name]['total_count']}\n\t\tawait ctx.send(\n\t\t\tf'You purchased {shares} share{plural} of {name} for {price * shares} {currency} '\n\t\t\tf'({price} {currency} each).\\nYou now have {bal} {currency}.'\n\t\t)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n sharesToSell = int(request.form.get(\"shares\"))\n if sharesToSell < 0:\n return apology(\"Shares to sell cannot be negative\", 400)\n\n sharesRows = db.execute(\"SELECT * FROM portfolio WHERE UserID = :userid AND Symbol = :enteredSymbol\",\n userid=session.get(\"user_id\"), enteredSymbol = symbol)\n\n numSharesOwned = 0\n for row in sharesRows:\n numSharesOwned += row[\"NumberOfShares\"]\n\n if numSharesOwned < sharesToSell:\n return apology(\"You don't own that many shares!\", 400)\n\n remainingSharesToSell = sharesToSell\n for row in sharesRows:\n numShares = row[\"NumberOfShares\"]\n if remainingSharesToSell >= numShares:\n '''delete row'''\n delete = db.execute(\"DELETE FROM portfolio WHERE id = :rowid\", rowid = row[\"id\"])\n remainingSharesToSell -= numShares\n else:\n '''update row'''\n updatedShares = numShares - remainingSharesToSell\n update = db.execute(\"UPDATE portfolio SET NumberOfShares = :numshares, TotalPrice = :tp WHERE id = :rowid\",\n numshares = updatedShares, tp = updatedShares * row[\"UnitPrice\"], rowid = row[\"id\"])\n remainingSharesToSell = 0\n\n if remainingSharesToSell == 0:\n break;\n\n quote = lookup(symbol)\n cashToReturn = quote[\"price\"] * sharesToSell\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session.get(\"user_id\"))\n usersCurrentCash = userRows[0][\"cash\"]\n\n updatedBalance = usersCurrentCash + cashToReturn\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\", cash = updatedBalance, userid = session.get(\"user_id\"))\n '''Update history'''\n dateNow = datetime.datetime.now()\n db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbl, :shares, :price, :date, :userid)\", symbl = symbol, shares = -1 * sharesToSell, price = -1 * cashToReturn, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n else:\n symbolRows = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n symbls = []\n for row in symbolRows:\n symbls.append(row[\"Symbol\"])\n\n return render_template(\"sell.html\", symbols=symbls)", "def sell():\n if request.method == \"POST\":\n current_user = session[\"user_id\"]\n\n\n if not request.form.get(\"sell_amount\"):\n return apology(\"Must provide a number to sell\", 403)\n\n stock_to_sell= request.form.get(\"stock_to_sell\")\n sell_amount= int(request.form.get(\"sell_amount\"))\n\n current_stocks = db.execute(\"SELECT volume FROM portfolio WHERE id = :id AND stock_symbol=:stock_symbol\", id=current_user, stock_symbol=stock_to_sell)\n # current_stocks=db.execute(\"SELECT volume FROM portfolio WHERE id= :id AND stock_symbol= :stock_symbol\", id=current_user, stock_symbol=stock_to_sell)\n\n\n\n if not current_stocks:\n return apology(\"You do not own any stocks, try refreshing the sell page\")\n\n current_volume = current_stocks[0][\"volume\"]\n current_volume = int(current_volume)\n\n if current_volume < int(request.form.get(\"sell_amount\")):\n return apology(\"Attempting to sell more shares than you own\", 403)\n\n lookedup=[]\n lookedup=lookup(request.form.get(\"stock_to_sell\"))\n if not lookedup:\n return apology(\"Unable to lookup stock info.\")\n\n stock_name = lookedup.get(\"name\")\n stock_price = lookedup.get(\"price\")\n stock_symbol = lookedup.get(\"symbol\")\n\n\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=current_user)\n # see if properly selecting cash amount\n if not user_cash:\n return apology(\"Didn't find user's current balance\", 000)\n # update user total cash\n current_cash = user_cash[0][\"cash\"]\n current_cash = int(current_cash)\n total_revenue = sell_amount * stock_price\n new_balance = current_cash + total_revenue\n db.execute(\"UPDATE users SET cash = :new_balance WHERE id = :id\", new_balance=new_balance, id=current_user)\n\n # update portfolio\n new_volume=0\n new_volume=current_volume-sell_amount\n db.execute(\"UPDATE portfolio SET volume = :new_volume WHERE id = :id AND stock_symbol = :stock_symbol\", new_volume=new_volume, id=current_user, stock_symbol=stock_symbol)\n\n # update sales database\n db.execute(\"INSERT INTO sales (id,stock_symbol,volume_sold,price,date_sold) VALUES(:id,:symbol,:amount,:price,datetime('now'))\", id=current_user, symbol=stock_symbol, amount=sell_amount, price=stock_price)\n\n\n return render_template(\"sold.html\",stock_name=stock_name, stock_price=stock_price, stock_symbol=stock_symbol,shares_to_sell=sell_amount, total_value=total_revenue)\n\n\n else:\n current_user = session[\"user_id\"]\n current_stocks=db.execute(\"SELECT stock_symbol, volume FROM portfolio WHERE id = :id\", id=current_user)\n if not current_stocks:\n return apology(\"You do not own any stocks\")\n return render_template(\"sell.html\",current_stocks=current_stocks)\n # return apology(\"i suck at selling?\")", "def sell():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Access form data\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n # Ensure symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n if not shares:\n return apology(\"must provide shares\", 400)\n\n # Obtain quote using lookup function\n QUOTED = lookup(symbol)\n\n # Check if user has enough shares to sell as requested\n shares_count = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n if shares > shares_count:\n return apology(\"not enough shares owned\", 400)\n\n # User has enough shares to sell as requested\n else:\n # Calculate new cash amount user has\n cash = db.execute(\"SELECT cash FROM users WHERE id = ?\", user_id)[0][\"cash\"]\n cash_gained = QUOTED[\"price\"] * shares\n new_cash_total = cash + cash_gained\n\n # Update cash in users table for user\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", new_cash_total, user_id)\n\n # Insert sell log into history table\n db.execute(\"INSERT INTO history (user_id, symbol, shares, price, transacted) VALUES (?, ?, ?, ?, datetime('now'))\",\n user_id, QUOTED[\"symbol\"], -(shares), QUOTED[\"price\"])\n\n # Keep track of shares in shares table\n current_shares = db.execute(\"SELECT shares_count FROM shares WHERE user_id = ? AND symbol = ?\",\n user_id, QUOTED[\"symbol\"])[0][\"shares_count\"]\n new_shares_total = current_shares - shares\n\n # If 0 shares left of the stock owned\n if new_shares_total == 0:\n db.execute(\"DELETE FROM shares WHERE user_id = ? AND symbol = ?\", user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User still owns shares of the stock\n else:\n shares_value_total = new_shares_total * QUOTED[\"price\"]\n db.execute(\"UPDATE shares SET shares_count = ?, price = ?, total = ? WHERE user_id = ? AND symbol = ?\",\n new_shares_total, QUOTED[\"price\"], shares_value_total, user_id, QUOTED[\"symbol\"])\n\n # Redirect user to home page\n flash(\"Sold!\", \"info\")\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n # Select share symbols from shares table for logged in user\n SHARES = db.execute(\"SELECT symbol FROM shares WHERE user_id = ?\", user_id)\n\n return render_template(\"sell.html\", shares=SHARES)", "def __sell(self, order, portfolio):\n amount = order.price * order.volume\n portfolio.remove_stock(order.symbol, order.volume)\n portfolio.add_cash(amount)\n return True", "def sell():\n\n if request.method == \"POST\":\n\n # define stock variables\n symbol = request.form.get(\"symbol\")\n stock = lookup(request.form.get(\"symbol\"))\n\n # error checking\n if not stock:\n return apology(\"Missing or Incorrect Symbol\", 400)\n\n # check if stock is owned\n try:\n sold_stock = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id AND symbol = :symbol GROUP BY symbol\", user_id=session[\"user_id\"], symbol=symbol)[0]\n except IndexError:\n return apology(\"Stock not owned\", 400)\n\n # check for shares input\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Input at least 1 share\", 400)\n\n if shares < 0:\n return apology(\"Input at least 1 Share\", 400)\n\n if int(sold_stock[\"shares\"]) < shares:\n return apology(\"Not enough shares to sell\", 400)\n\n else:\n # define variables for inserting into transactions table and updating cash\n purchase_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # update user cash\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n user_cash = user_cash + (stock[\"price\"]*shares)\n db.execute(\"UPDATE users SET cash = :user_cash WHERE id = :user_id\", user_id=session[\"user_id\"], user_cash=user_cash)\n\n # update transactions table with selling transaction\n db.execute(\"\"\"\n INSERT INTO transactions(user_id, date, symbol, shares, price)\n VALUES(:user_id, :date, :symbol, :shares, :price)\n \"\"\",\n user_id=session[\"user_id\"],\n date=purchase_date,\n symbol=stock[\"symbol\"],\n shares=-shares,\n price=stock[\"price\"]\n )\n\n flash(\"You paper-handed that one!\")\n return redirect(\"/\")\n\n else:\n # query db for current holdings\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n stocks[:] = [stock for stock in stocks if stock.get('shares') > 0]\n return render_template(\"sell.html\", stocks=stocks)", "def sell_btc(self, qty):\n url = self.base_url + 'sells'\n request_data = {\n \"qty\": qty,\n }\n body = json.dumps(request_data)\n self.session.headers.update(self.sign(url, body=body))\n self.session.headers.update(request_data)\n resp = self.session.post(url=url, data=body)\n return resp.json()", "def sell(self, date_idx: int, cash_balance: float, buy_budget: float) -> float:\n todays_price: float = self.price_history.iat[date_idx, 1]\n sell_value: float = self.shares * todays_price\n new_cash_balance: float = cash_balance + sell_value\n profit_or_loss = sell_value - buy_budget\n if Helpers.is_verbose_on():\n if profit_or_loss >= 0:\n text_color: str = 'green'\n else:\n text_color = 'red'\n cprint(f\"{self.ticker}: sell {self.shares:.2f} shares at {todays_price:.2f} \"\n f\"for ${sell_value:.2f} on date {date_idx}. Cash balance: {new_cash_balance:.2f}\",\n text_color)\n self.shares = 0\n self.last_sell_date_idx = date_idx\n return new_cash_balance", "def sell_to_close(self, symbol, date, price):\n\n # Exit the position\n positions_by_symbol = self.active_positions_by_symbol\n position = positions_by_symbol[symbol]\n position.exit(date, price)\n\n # Receive the cash\n sale_value = position.last_value * (1 - self.percent_slippage)\n self.cash += sale_value\n self.portfolio_history.record_cash(date, self.cash)\n\n # Record in portfolio history\n self.portfolio_history.add_to_history(position)\n del positions_by_symbol[symbol]", "def sell():\n\n rows = db.execute('SELECT symbol, shares FROM transactions WHERE id = :id', id=session['user_id'])\n\n # Generate a list of stock's symbols owned by the current user\n stocks = {stock[\"symbol\"]: stock[\"shares\"] for stock in rows}\n\n # User reached route via POST\n if request.method == 'POST':\n\n if not request.form.get('symbol'):\n return apology('must provide symbol', 403)\n\n elif request.form.get('symbol') not in stocks:\n return apology(\"you don't own any stock of this company\")\n\n try:\n if int(request.form.get('shares')) < 1:\n return apology('must prove a positive number of stocks')\n\n elif int(request.form.get('shares')) > stocks[request.form.get('symbol')]:\n return apology(\"you don't own that shares\")\n except ValueError:\n return apology(\"input isn't an integer\", 403)\n\n stock_price = lookup(request.form.get('symbol'))['price']\n\n db.execute('INSERT INTO transactions (id, operation, symbol, shares, price) VALUES(:id, :operation, :symbol, :shares, :price)',\n id=session['user_id'],\n operation='SELL',\n symbol=request.form.get('symbol'),\n shares=request.form.get('shares'),\n price=stock_price\n )\n\n db.execute('UPDATE users SET cash = cash + :y WHERE id = :id',\n y=stock_price * int(request.form.get('shares')),\n id=session['user_id']\n )\n\n return redirect('/')\n\n # User reached route via GET\n else:\n return render_template('sell.html', stocks=stocks)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n price = lookup(symbol)['price']\n\n if not request.form.get('ammount').isnumeric() or int(request.form.get('ammount')) % 100 != 0:\n return apology(\"The ammount is not a valid number, should be a multiple of 100\", 501)\n\n ammount = int(request.form.get('ammount'))\n cost = price * ammount\n current_stock = db.execute(\"SELECT * FROM stocks WHERE user_id = ? AND symbol = ?\", session[\"user_id\"], symbol)\n current_cash = db.execute(\"SELECT * FROM users WHERE id = ?\", session[\"user_id\"])\n\n if ammount > current_stock[0][\"ammount\"] or len(current_stock) == 0:\n return apology(\"Your stocks are not that high!\", 501)\n else:\n update_database(session[\"user_id\"], symbol, ammount, price, \"sell\", current_stock[0], current_cash[0])\n \n return redirect(\"/\")\n\n return render_template(\"sell.html\")", "def sell():\n\n if request.method == \"POST\":\n\n # get share symbol from form\n symb = request.form.get(\"symbol\")\n\n # retrieve stock price, symbol and stock name via lookup function (returns dict object)\n quote = lookup(request.form.get(\"symbol\"))\n if not quote:\n return apology(\"Lookup failed\", 400)\n\n # retrieve number of shares to sell as an int and convert it to a negative number\n try:\n quant = int(request.form.get(\"shares\"))\n except ValueError:\n # apologise if not an int\n return apology(\"Invalid quantity\", 400)\n else:\n quant = abs(quant)*-1\n\n # variable to show user's current cash\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n cash = cash[0][\"cash\"]\n\n # check if user owns that particular stock and that they have the same or more quantity\n shares = db.execute(\"SELECT name, SUM(quantity) FROM portfolio WHERE userid = :userid GROUP BY name\", userid=session[\"user_id\"])\n\n for share in shares:\n # if the share is found in the list (the user owns it)\n if share[\"name\"] == quote[\"name\"]:\n # if the quantity of the shares owned is greater than the quantity the user wants to sell\n if share[\"SUM(quantity)\"] > quant:\n # insert transaction into portfolio table\n db.execute(\"INSERT INTO portfolio (name, userid, price, quantity) VALUES (:name, :userid, :price, :quantity)\",name=quote[\"symbol\"],userid=session[\"user_id\"], price=quote[\"price\"], quantity=quant)\n # update user's cash in the users table\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash+(quant*-1)*quote[\"price\"], id=session[\"user_id\"])\n # return user to index summary page after sell\n return redirect('/')\n # if the quantity of the particualr share is less than the quantity user wants to sell, then apologise\n else:\n apology(\"You don't have that many to sell!\", 400)\n else:\n apology(\"You don't own any of that name\", 400)\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n # get a list of share names that the user owns for the select HTML element\n select = db.execute(\"SELECT name FROM portfolio WHERE userid=:id GROUP BY name\", id=session[\"user_id\"])\n\n return render_template(\"sell.html\", select=select)\n\n return apology(\"Buy failed\", 400)", "def sell():\n if request.method == \"GET\":\n portf = db.execute(\"SELECT * FROM portfolio WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"sell.html\",portfolio = portf)\n else:\n\n quote = lookup(request.form.get('stocklist'))\n print(str(quote))\n # Remove the stock frm user's portfolio\n # taking no of shares provided by user in form\n shares = int(request.form.get(\"no_of_shares\"))\n\n # Taking the price of that share\n\n price = db.execute(\"SELECT price FROM portfolio WHERE symbol=:symbol AND id=:id\", symbol = quote[\"symbol\"], id = session[\"user_id\"])\n\n # totla_price\n total_remove_price = shares * quote[\"price\"]\n # Now updating\n print(total_remove_price)\n # Taking total no of shares from portfolio\n share = db.execute(\"SELECT shares FROM portfolio WHERE id=:id AND symbol=:symbol\",symbol = quote[\"symbol\"],\n id = session[\"user_id\"])\n total = db.execute(\"SELECT total FROM portfolio WHERE id=:id AND symbol=:symbol\",symbol = quote[\"symbol\"],\n id = session[\"user_id\"])\n\n # if share provided by user in form is less than or equal to total shares owned then only transaction will processed\n print(share[0][\"shares\"])\n print(shares)\n if (shares < share[0][\"shares\"]):\n # Remove stock and price and no of stocks stocks = stocks - n\n real_total = total[0][\"total\"].split(\"$\")\n\n new_total1 = real_total[1][2:]\n new_total2 = real_total[1][:1]\n yup_final = new_total1 + new_total2\n print(yup_final)\n db.execute(\"UPDATE portfolio set total=:total, shares=:shares WHERE id=:id\", total = float(yup_final) - total_remove_price\n , shares = int(share[0][\"shares\"]) - shares , id=session[\"user_id\"])\n # current selling price = price * stocks and add this to user's cash\n elif (shares == share[0][\"shares\"]):\n db.execute(\"DELETE FROM portfolio WHERE id=:id AND symbol=:symbol\", id = session[\"user_id\"], symbol = quote['symbol'])\n else:\n return apology(\"Unable to process request\", 404)\n return redirect(\"/\")", "def test_sell_shares_success(self):\n\t\texpected_response_code = 200\n\t\tsymbol = \"DDD\" \n\t\tquantity = 100\n\t\t\n\t\tdisplayName, email, password = (\"John Doe\", \"johndoe@test.com\", \"12345678\")\n\t\tregistration_response = ApiFacade.register_user(displayName, email, password)\n\t\tauthentication_response = ApiFacade.authenticate_user(email, password)\n\t\ttoken = authentication_response.get_token()\n\n\t\t# get account id\n\t\tviewdetails_response = ApiFacade.view_details(token)\n\t\taccount_id = viewdetails_response.get_main_account_id()\n\n\t\t# buy shares first\n\t\tbuyshare_response = ApiFacade.buy_share(token, account_id, symbol, int(quantity))\n\t\t\n\t\t# sell the shares\n\t\tsellshare_response = ApiFacade.sell_share(token, account_id, symbol, int(quantity / 3))\n\n\t\tdeletion_response = ApiFacade.delete_user(token)\n\n\t\tself.assertEqual(sellshare_response.get_http_status(), expected_response_code, \n\t\t\tmsg = \"Expected HTTP{0}; got HTTP{1}\"\n\t\t\t.format(expected_response_code, sellshare_response.get_http_status()))", "def get_stock_price(stock):\n pass", "def sell():\n\n # User submits information\n if request.method == \"POST\":\n\n # Ensure user entered a stock\n if not request.form.get(\"symbol\"):\n return apology(\"must choose a stock\")\n\n # Get stock selected\n symbol = request.form.get(\"symbol\")\n \n # Ensure is a valid stock symbol\n if not lookup(symbol):\n return apology(\"Invalid stock symbol\")\n\n # Ensure user owns the stock requested\n test = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n if not test:\n return apology(\"you have 0 shares of this stock\")\n\n owns = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n # Ensure user entered a number in shares\n if not request.form.get(\"shares\") or not isinstance(request.form.get(\"shares\"), int):\n return apology(\"must enter postive whole number of shares\")\n\n shares = request.form.get(\"shares\")\n\n # Ensure number is positive\n if shares <= 0:\n return apology(\"must enter a positive number\")\n\n # Ensure user owns the amount of stock entered to sell\n if shares > owns[0]['shares']:\n return apology(\"you don't own that much of this stock\")\n\n # Get date and time for transaction\n day = datetime.now()\n time = datetime.now().time()\n\n # Get total and stock name for transaction\n price = lookup(symbol)['price']\n total = price * shares\n name = lookup(symbol)['name']\n\n # Sell shares of the stock and add to transactions history\n db.execute(\"INSERT INTO transactions (user_id, date, time, price, shares, total, stock, name, type) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n session[\"user_id\"], day, time, price, shares * -1, total, symbol, name, \"sell\")\n\n # Update portfolios table\n db.execute(\"UPDATE portfolios SET shares = shares - ? WHERE user_id = ? AND stocks = ?\", shares, session[\"user_id\"], symbol)\n\n # If stock shares is 0, delete from portfolio\n db.execute(\"DELETE FROM portfolios WHERE shares = ? \", 0)\n\n return redirect(\"/\")\n\n # If user reached page via link or redirect\n else:\n\n # Get list of stocks owned\n owns = db.execute(\"SELECT stocks FROM portfolios WHERE user_id = ? ORDER BY stocks\", session[\"user_id\"])\n\n return render_template(\"sell.html\", owns=owns)", "def sell(self, currency_pair, rate, amount):\n return self.api_query('sell', {\"currencyPair\": currency_pair, \"rate\": rate, \"amount\": amount})", "def reverse_sell(self, amount):\n trade_amount = 0\n precision = pow(10, self.pair.get_quote_token().get_decimals() - self.pair.get_base_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_BUY])):\n offer = self.book[Trade.WAY_BUY][i]\n amount_quote = offer.get_quote_amount() # GAS\n amount_base = offer.get_base_amount() # NEO\n price = offer.get_price()\n\n if amount_base >= amount:\n if self.pair.get_exchange().get_fee_token():\n trade_amount = trade_amount + amount/price * precision\n else:\n trade_amount = trade_amount + amount/price * precision / (1 - self.pair.get_exchange().get_fees())\n return int(trade_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue\n '''\n trade_amount = trade_amount + amount_quote\n amount = amount - amount_base\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def sell(self, ticker, volume):\n if volume <= 0: \n raise errs.VolumeLessThanZeroError\n\n sell_trade = Trade(ticker=ticker, volume=volume, account_id=self.id)\n if trade.get_current_price(ticker) is None:\n raise errs.NoSuchTickerError\n else:\n sell_trade.unit_price = trade.get_current_price(ticker)\n \n decrease_position = Position.from_account_id_and_ticker(account_id=sell_trade.account_id, ticker=sell_trade.ticker)\n if decrease_position.shares < sell_trade.volume:\n raise errs.InsufficientSharesError\n decrease_position.shares -= sell_trade.volume\n decrease_position.save()\n\n sell_trade.volume *= -1 # Differentiates buys/sells with pos/negative volume\n sell_trade.save()", "def sell():\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n else:\n tick = request.form.get(\"ticker\")\n quote = lookup(tick)\n if not quote:\n return apology(\"Ticker does not exist\")\n shares = int(request.form.get(\"shares\"))\n if shares <= 0:\n return apology(\"Please input a valid number of shares\")\n money = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n #if shares < int(money[0][\"shares\"]):\n # return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE users SET cash = cash + :purchase WHERE id = :id\", id=session[\"user_id\"], purchase=(quote[\"price\"] * float(shares)))\n findshares = db.execute(\"SELECT shares FROM purchases WHERE user_id = :id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"])\n \n \n if not findshares:\n return apology(\"You don\"t have those shares >:(\")\n else:\n if int(findshares[0][\"shares\"]) < int(shares):\n return apology(\"You don\"t have those shares >:(\")\n db.execute(\"UPDATE purchases SET shares=:number, total=:total WHERE user_id=:id AND ticker=:ticker\", id=session[\"user_id\"], ticker=quote[\"symbol\"], total=(float(quote[\"price\"])*float(shares)), number=int(findshares[0][\"shares\"]) - int(shares))\n return redirect(url_for(\"index\"))\n\nif __name__ == \"__main__\":", "def sell():\n\n # if user reached route via GET return them an input form\n if request.method == \"GET\":\n return render_template(\"sell.html\")\n\n # if user reached route via POST (as by submitting a form via POST)\n elif request.method == \"POST\":\n\n # get id as it is used many times\n id = session[\"user_id\"]\n\n # get symbol input\n symbol = request.form.get(\"symbol\")\n\n # get share volume requested\n volume = int(request.form.get(\"volume\"))\n\n # ensure stock symbol was submitted\n if not symbol:\n return apology(\"you must provide a stock symbol\")\n\n # ensure positive volume (integer rule handled elsewhere)\n elif volume <= 0:\n return apology(\"volume must be integer greater than 0\")\n\n # lookup stock on yahoo\n stock_info = lookup(symbol)\n\n # if error looking stock up\n if not stock_info:\n return apology(\"that stock symbol doesn't exist\")\n\n # check if user already owns any stock in this company\n existing = db.execute(\"SELECT num_shares FROM portfolio WHERE id = :id AND symbol = :symbol\", id=id, symbol=symbol)\n\n # if sufficient cash, make purchase, else return apology\n if not existing:\n return apology(\"you don't own this stock\")\n else:\n if existing[0]['num_shares'] < volume:\n return apology('you cannot sell more shares than you own')\n else:\n # query database for\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=id)\n cash = cash[0]['cash']\n\n minus = db.execute(\"UPDATE portfolio SET num_shares = :num_shares WHERE id = :id AND symbol = :symbol\", num_shares=existing[0]['num_shares'] - volume, id=id, symbol=symbol)\n\n # set date string\n dstring = str(datetime.datetime.utcnow())\n\n # update transaction history\n result2 = db.execute(\"INSERT INTO `transaction` (id, symbol, volume, share_price, dtstamp) VALUES(:id, :symbol, :volume, :share_price, :dtstamp)\", id=id, symbol=symbol, volume=-volume, share_price=stock_info['price'], dtstamp=dstring)\n\n # calculate sale price\n sale_price = stock_info['price'] * volume\n\n # increase cash balance\n result = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=cash+sale_price, id=id)\n\n # redirect user to home page\n return redirect(url_for(\"index\"))", "def sell_stock(self, stock, amount, date=None):\n if date is None:\n date = self.date\n\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n self.order_stock(stock, self.stock_data[stock].position['Position'][date] - amount, date)", "def sell():\n # Moved userID outside of 'if' as could not be accessed in 'else' for html.\n userID = session[\"user_id\"]\n\n if request.method == \"POST\":\n\n user = db.execute(\"SELECT * FROM users WHERE id = :id\", id=userID)\n cash = user[0][\"cash\"]\n\n stock = lookup(request.form.get(\"symbol\"))\n\n numOfShares = float(request.form.get(\"shares\"))\n if not request.form.get(\"symbol\"):\n return apology(\"You haven't typed a symbol\")\n if stock is None:\n return apology(\"This doesn't seem to be a valid symbol, try again\")\n if numOfShares < 0:\n return apology(\"You must state how many shares you want to sell\")\n\n salePrice = stock[\"price\"] * numOfShares\n date_time = datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n\n stockOwned = db.execute(\"SELECT * FROM portfolio WHERE id=:userID AND symbol=:symbol\", userID=userID, symbol=stock[\"symbol\"])\n if not stockOwned:\n return apology(\"You don't own any of this stock\")\n if stockOwned[0][\"numOwned\"] < numOfShares:\n return apology(\"You are trying to sell more shares than you own!\")\n else:\n newNumOwned = float(stockOwned[0][\"numOwned\"]) - numOfShares\n newTotalValue = newNumOwned * stock[\"price\"]\n db.execute(\"UPDATE users SET cash=cash+:salePrice WHERE id=:userID\", salePrice=salePrice, userID=userID)\n db.execute(\"INSERT INTO transactions (id, symbol, num_shares, price_ps, date_time, buy_or_sell) VALUES (:userID, :symbol, :num_shares, :price_ps, :date_time, :buy_or_sell)\",\n userID=userID, symbol=stock[\"symbol\"], num_shares=numOfShares, price_ps=stock[\"price\"], date_time=date_time, buy_or_sell=\"SELL\")\n db.execute(\"UPDATE portfolio SET numOwned=:newNumOwned, totalValue=:newTotalValue WHERE id=:userID AND symbol=:symbol\",\n newNumOwned=newNumOwned, newTotalValue=newTotalValue, userID=userID, symbol=stock[\"symbol\"])\n\n return redirect(\"/\")\n else:\n symbols = db.execute(\"SELECT symbol FROM portfolio WHERE id=:userID\", userID=userID)\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensures symbol was submitted, otherwise return apology\n if not request.form.get(\"symbol\"):\n return apology(\"must provide username\", 400)\n # Ensures shares was submitted, otherwise return apology\n if not request.form.get(\"shares\"):\n return apology(\"must provide username\", 400)\n # The symbol user selected\n symbolselected = request.form.get(\"symbol\")\n # The amount of shares of the stock user inputed\n amtshares = db.execute(\"SELECT SUM(shares), symbol FROM portfolio WHERE userid = :userid GROUP BY :symbol\",\n userid=session[\"user_id\"], symbol=symbolselected)\n # Get the int version of how many shares person currently has\n amtshares = int(amtshares[0][\"SUM(shares)\"])\n\n # Amount of shares user wants to sell (it's negative because it reduces amount of shares user has for the stock)\n sharesinputed = -int((request.form.get(\"shares\")))\n # If user does not have enough stock to sell with inputed amount of shares, return apology\n if (amtshares + sharesinputed) < 0:\n return apology(\"You do not have enough shares\", 400)\n\n # Sets quote to the information about symbol inputed by user\n quote = lookup(request.form.get(\"symbol\"))\n # Ensures symbol is a valid symbol that has a quote\n if not quote:\n return apology(\"Symbol Invalid\", 400)\n # Amount of money stock will sell for\n value = quote[\"price\"]\n # Name of stock\n name = quote[\"name\"]\n # Total amount of money needed to buy the amount and type of stock user has inputed\n total = (value * sharesinputed)\n\n # Inserts sell transaction record into portfolio\n db.execute(\"INSERT INTO portfolio (userid, symbol, price, shares, TOTAL, transacted, name) VALUES(:userid, :symbol, :price, :shares, :TOTAL, :transacted, :name)\",\n userid=session[\"user_id\"], symbol=symbolselected, price=value, shares=sharesinputed, TOTAL = total, transacted=datetime.datetime.now(), name=name)\n\n # Finds the amount of money user has to spend on stocks\n amount = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n # The virst value in the array is the amount of money user can spend\n money = amount[0][\"cash\"]\n # Final money count after adding value of stock (subtraction is used since total is negative, and we are adding sales value to cash)\n finalcashamount = money - total\n # Updates cash for user\n db.execute(\"UPDATE users SET cash = :finalcashamount WHERE id=:userid\",\n finalcashamount=finalcashamount, userid=session[\"user_id\"])\n # Redirects user to index page\n return redirect(\"/\")\n # If user is accessing sell page\n else:\n # List of symbols (not repeating)\n symbols = db.execute(\"SELECT symbol FROM portfolio WHERE userid = :userid GROUP BY symbol\", userid=session[\"user_id\"])\n\n # Returns sell.html with different types of symbols\n return render_template(\"sell.html\", symbols=symbols)", "def sell():\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Obtain the symbol and shares that the user inputted\n stock = request.form.get(\"symbol\")\n sold = request.form.get(\"shares\")\n\n # Compute the number of shares in the account\n shares = db.execute(\"SELECT shares FROM summary WHERE id = :username AND symbol= :symbol\", username=session[\"user_id\"], symbol=stock)[0][\"shares\"]\n update = int(shares) - int(sold)\n\n # Ensure stock validity\n if stock == \"\":\n return apology(\"must select a stock\", 403)\n elif int(shares) == 0:\n return apology(\"stock not owned\", 403)\n\n # Ensure an appropriate amount of shares is requested\n if int(sold) < 0:\n return apology(\"invalid stock shares\", 403)\n elif int(shares) < int(sold):\n return apology(\"not enough shares owned\", 403)\n\n # Insert updated information into database\n db.execute(\"INSERT INTO purchase (id, symbol, shares, price, created_at) VALUES(:id,:symbol,:shares,:value, datetime('now'))\", id=session[\"user_id\"], symbol=stock, shares=\"-\"+sold, value=lookup(stock)[\"price\"])\n db.execute(\"UPDATE summary SET shares= :value WHERE (id = :username AND symbol= :symbol)\", value=str(update), username = session[\"user_id\"], symbol=stock)\n\n # Update the amount of cash in account\n cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])[0][\"cash\"]\n db.execute(\"UPDATE users SET cash = :new\", new = cash + (int(sold) * lookup(stock)[\"price\"]) )\n\n # Redirect users to login page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n symbols = []\n\n stocks = db.execute(\"SELECT * FROM summary WHERE id = :username\", username = session[\"user_id\"])\n\n # Create a list of stocks that the user owns and can sell\n for item in stocks:\n symbol = item[\"symbol\"]\n symbols.append(symbol)\n\n return render_template(\"sell.html\", symbols = symbols)", "def sell(self, bar, volume):\n self.place(Order(symbol=bar.symbol,\n volume=volume,\n price=bar.close,\n transaction=TransactionType.SELL,\n timestamp=bar.timestamp))", "async def price(self, ctx, name):\n\t\tname = name.upper()\n\t\ttry:\n\t\t\tstock_data = await self._get_stock_data([name])\n\t\texcept ValueError as e:\n\t\t\treturn await ctx.send(e)\n\t\tif name not in stock_data:\n\t\t\tawait ctx.send(f'I couldn\\'t find any data for the stock {name}. Please try another stock.')\n\t\t\treturn\n\t\tprice = stock_data[name]['price']\n\t\treal = str(price)\n\t\treal = ('0' * (3 - max(len(real), 0))) + real\n\t\treal = '$' + real[:-2] + '.' + real[-2:]\n\t\tcurrency = await bank.get_currency_name(ctx.guild)\n\t\tawait ctx.send(f'**{name}:** {price} {currency} per share ({real}).')", "def sell(self):\n #TODO\n #hint: use the raise method to create an exception.\n if self.quantity < 1:\n raise SoldOutOfStockError(self.name)\n else:\n return 1\n # item getters", "def sell():\n if request.method=='POST':\n #parameter is not filled\n if not request.form.get(\"shares\"):\n return apology(\"Please enter how much u want to sell\",400)\n #check if shares(amount) that are going to be sell less than owner's share.\n sell=request.form.get(\"symbol\")\n shares=request.form.get(\"shares\")\n amount=db.execute(\"SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions\",session[\"user_id\"],sell)\n if amount[0][\"amount\"]<int(shares):\n return apology(\"You dont own that much shares\",400)\n\n #record sell and add cash amount\n quote=lookup(sell)\n price=quote[\"price\"]\n total=int(price)*int(shares)\n\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\",session[\"user_id\"],(int(shares)*-1),quote[\"symbol\"],price)\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n\n return render_template(\"sell.html\",rows=rows)", "def sell(self, symbol: str=None, quantity: int=0, in_force: str='gtc', extended: bool=False):\n return self.trader.sell(symbol, quantity, in_force, extended)", "def sell():\n rows = db.execute(\"SELECT stock_id, shares, stocks.symbol FROM portfolio JOIN stocks ON portfolio.stock_id = stocks.id WHERE user_id==:user_id\", user_id=session[\"user_id\"])\n if request.method==\"GET\":\n return render_template(\"sell.html\", rows=rows)\n else:\n symbol = request.form.get(\"symbol\")\n if symbol==\"None\":\n return apology(\"You must select a symbol\")\n # shares sold will be stored in history table with negative value\n shares = int(request.form.get(\"shares\"))*(-1)\n if abs(shares) > rows[0][\"shares\"]:\n return apology(\"You don't own enough shares\")\n # run lookup function\n dict_4 = lookup(symbol)\n price = dict_4[\"price\"]\n # Insert new transaction in 'history' table\n db.execute(\"INSERT INTO history(user_id, stock_id, price, shares, buy) VALUES(:user_id, :stock_id, :price, :shares, :buy)\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], price=price, shares=shares, buy=0)\n # UPDATE shares in 'portfolio' table\n new_shares = (rows[0][\"shares\"])+shares\n db.execute(\"UPDATE portfolio SET shares==:shares WHERE user_id==:user_id and stock_id==:stock_id\", user_id=session[\"user_id\"], stock_id=rows[0][\"stock_id\"], shares=new_shares)\n # Update cash in 'users' table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n new_cash = row_cash[0][\"cash\"]-(price*shares)\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # message to be retrieved in portfolio.html when user sells stock\n flash('Sold!')\n return redirect(\"/\")", "def sellStock(self, barcode):\n # TODO\n # hint: Look through the list of items\n # and call the 'sell' method of the relevant item\n # return an error if the product isn't found\n # No. 7\n invalid_barcode = 0\n for item in self.stocklist:\n if barcode == StockItem.getBarcode(item):\n invalid_barcode = 1\n if StockItem.sell(item) == 1:\n # StockItem.setQuantity(StockItem, 0) find away of reducing the stock quantity\n newQty = StockItem.getQuantity(item) - 1 # We reduce stock by one per item solid\n StockItem.setQuantity(item, newQty)\n print(\"Sold: Successfully: Qty remaining: \", StockItem.getQuantity(item))\n else:\n raise SoldOutOfStockError()\n if invalid_barcode == 0:\n raise ItemNotFoundError(barcode)", "def sell_bike(self, i):\n if i < len(self.inventory):\n self.sold.append(self.inventory[i])\n else:\n print \"That bike is not in stock\"", "def sell():\n\n user = session[\"user_id\"]\n\n # If GET just view\n if request.method == \"GET\":\n # view transactions\n rows = db.execute(\"SELECT symbol, amount FROM stocks WHERE user_id = :user\", user=user)\n\n # Create dictionary for stocks data owned\n stocks = {}\n for row in rows:\n stocks[row['symbol']] = row['amount']\n\n return render_template(\"sell.html\", stocks=stocks)\n\n # I case of POST\n amount=int(request.form.get(\"amount\"))\n symbol=request.form.get(\"symbol\")\n price=lookup(symbol)[\"price\"]\n value=round(price * float(amount))\n\n # Update stocks table\n stocks_before = db.execute(\"SELECT amount FROM stocks WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user)[0]['amount']\n stocks_after = stocks_before - amount\n\n # not enough\n if stocks_after < 0:\n return render_template(\"sell.html\", error=True, message=\"You can't sell more than you have\")\n\n # delete stock\n elif stocks_after == 0:\n db.execute(\"DELETE FROM stocks WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user)\n\n # or update it\n else:\n db.execute(\"UPDATE stocks SET amount = :amount WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user, amount=stocks_after)\n\n # update cash and history\n cash = db.execute(\"SELECT cash FROM users WHERE id = :user\", user=user)[0]['cash']\n cash_after = cash + price * float(amount)\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :user\", cash=cash_after, user=user)\n db.execute(\"INSERT INTO transactions(user_id, symbol, amount, value) VALUES (:user, :symbol, :amount, :value)\",\n user=user, symbol=symbol, amount=-amount, value=value)\n\n # If success redirect\n return redirect(\"/\")", "def ramp_down(self) -> None:\n for stock in self.stocks:\n if stock.are_any_shares_owned():\n self.cash_balance = stock.sell(-1, self.cash_balance, self.buy_budget)", "def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")", "def market_sell(self, order_id, quantity):\n Library.functions.market_sell(self._book, order_id, quantity)", "def sell():\n\n # id user session\n user_id = session[\"user_id\"]\n\n # User reached route via GET\n if request.method == \"GET\":\n\n # Shares owned by the user\n current_stocks = db.execute(\n \"SELECT symbol FROM purchases WHERE user_id = :id GROUP BY symbol HAVING SUM(shares) > 0\", id=user_id)\n return render_template(\"sell.html\", current_stocks=current_stocks)\n\n # User reached route via POST\n else:\n\n # Assign inputs to variables\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n # Choose symbol\n if not symbol:\n return apology(\"choose symbol\")\n\n # Ensure user entered a positive integer for number of shares\n if int(shares) <= 0:\n return apology(\"number of shares must be a positive integer\")\n\n # Query database for user's purchases\n stock = db.execute(\"SELECT SUM(shares) as shares FROM purchases WHERE user_id = :id AND symbol = :symbol\",\n id=user_id, symbol=symbol)\n\n # Ensure user has enough shares for selected symbol\n if stock[0][\"shares\"] < int(shares):\n return apology(\"not enough shares\")\n\n # Query database to insert transaction\n db.execute(\"INSERT INTO purchases (user_id, symbol, name, shares, price, data) VALUES (:id, :symbol, :name, :shares, :price, :data)\",\n id=user_id,\n symbol=symbol,\n name=lookup(symbol)[\"name\"],\n shares=int(shares) * (-1),\n price=lookup(symbol)[\"price\"],\n data=datetime.now())\n\n # Calculate total price based on number of shares and stock's current price\n total_price = lookup(symbol)[\"price\"] * int(shares)\n\n # How much cash the user currently\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=user_id)[0][\"cash\"]\n\n # Query database to update user's cash balance\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\",\n id=user_id,\n cash=cash + total_price)\n\n # Redirect user to homepage\n return redirect(\"/\")", "def do_sell():\n order_size = calculate_sell_order_size()\n if order_size is None:\n return None\n i = 1\n while i <= CONF.trade_trials:\n sell_price = calculate_sell_price(get_current_price())\n order = create_sell_order(sell_price, order_size)\n if order is None:\n LOG.error(\"Could not create sell order over %s\", order_size)\n return None\n write_action('-SELL')\n order_status = poll_order_status(order.id, 10)\n if order_status == 'open':\n cancel_order(order)\n i += 1\n daily_report()\n else:\n return order\n write_action('-SELL')\n return create_market_sell_order(order_size)", "def sell():\n\n # User reached route via GET (as by submitting a form via GET)\n if request.method == \"GET\":\n\n # Select user symbol from total\n symbol_sel = db.execute(\"SELECT symbol FROM total WHERE userID = :userID\", userID=session[\"user_id\"])\n return render_template(\"sell.html\", symbol_sel=symbol_sel, sslen=len(symbol_sel) )\n else:\n # Get symbol and number through input form\n symbol = request.form.get(\"symbol\")\n number = request.form.get(\"shares\")\n\n # Ensure sell symbol was submitted\n if not symbol:\n return apology(\"must provide symbol\", 400)\n\n # Ensure sell number was submitted\n if not number:\n return apology(\"must provide number\", 400)\n\n # Check if request.form.get(\"symbol\") in lookup() table\n symbol = lookup(symbol)\n if not symbol:\n return apology(\"must provide right symbol\", 400)\n else:\n\n # Get name, price, symbol from lookup function\n name = symbol.get(\"name\")\n price = symbol.get(\"price\")\n symbol = symbol.get(\"symbol\")\n\n # SELECT symbol in TABLE total\n symbolIn = db.execute(\"SELECT symbol FROM total WHERE userID = :userID and symbol = :symbol\",\n userID=session[\"user_id\"], symbol=symbol)\n\n # Ensure user have this symbol\n if not symbolIn:\n return apology(\"you don't have this symbol\", 400)\n\n # Ensure sell number is a number\n nlen = len(number)\n for i in range(nlen) :\n if number[i].isdigit() != True :\n return apology(\"sell number need to be a number\", 400)\n\n number = int(number)\n\n # Check positive number\n if number > 0:\n\n # SELECT sharesTotal in TABLE total\n symbolNum = db.execute(\"SELECT sharesTotal FROM total WHERE userID = :userID and symbol = :symbol\",\n userID=session[\"user_id\"], symbol=symbol)\n\n # Ensure user have sharesTotal\n if symbolNum[0][\"sharesTotal\"] < number:\n return apology(\"you don't have this number\", 400)\n\n # Selsct cash from user TABLE\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userID\",\n userID=session[\"user_id\"])\n\n # Count total\n totalGet = price*number\n cash = cash[0][\"cash\"] + totalGet\n\n # Update csah in user\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userID\", cash=cash, userID=session[\"user_id\"])\n\n # Check sell time\n now = datetime.now(timezone('Asia/Shanghai'))\n\n # INSERT sell TABLE date, shares, price, name, symbol, totalGet\n db.execute(\"INSERT INTO sell (date, symbol, name, price, shares, totalGet, userID) VALUES (:date, :symbol, :name, :price, :shares, :totalGet, :userID)\",date=now, symbol=symbol, name=name, price=price, shares=number, totalGet=totalGet, userID=session[\"user_id\"])\n\n # Add to buy-sell table\n db.execute(\"INSERT INTO bs (symbol, price, shares, date, userID) VALUES (:symbol, :price, :shares, :date, :userID)\", symbol=symbol, price=usd(price), shares=-number, date=now, userID=session[\"user_id\"])\n\n # SELECT costmoneyTotal FROM total\n costTot = db.execute(\"SELECT costmoneyTotal FROM total WHERE userID = :userID and name = :name\",\n userID=session[\"user_id\"], name = name)\n\n # Change costmoneyTotal FROM total\n costTotEnd = costTot[0][\"costmoneyTotal\"]-totalGet\n\n # Update sharesTotal, costmoneyTotal total did by order\n db.execute(\"UPDATE total SET sharesTotal = :sharesTotal, costmoneyTotal = :costmoneyTotal WHERE userID = :userID and name = :name\", sharesTotal=symbolNum[0][\"sharesTotal\"]-number, costmoneyTotal=costTotEnd, userID=session[\"user_id\"], name=name)\n\n # Falsh massage\n flash('sell')\n\n # render selled template\n return render_template(\"selled.html\",symbol=symbol, name=name, price=price, number=symbolNum[0][\"sharesTotal\"]-number, totalGet=usd(totalGet), costTotEnd=usd(cash))\n else:\n return apology(\"positive number\", 400)", "def sell():\n if request.method == \"POST\":\n\n #test for selection of stocks\n if request.form.get(\"symbol\") == \"\" or request.form.get(\"shares\") == \"\":\n return apology(\"Please fill in all fields\")\n\n #test for positive integer\n if str.isdigit(request.form.get(\"shares\")) == False:\n return apology(\"Please select a positive number of shares\")\n\n # does the user have enough shares of that stock\n user_stock = request.form.get(\"symbol\")\n user_number = int(request.form.get(\"shares\"))\n owned = db.execute(\"SELECT SUM(number) FROM portfolio WHERE userid=:id AND stock=:stock\", stock = user_stock, id=session[\"user_id\"])\n owned = int(owned[0]['SUM(number)'])\n if user_number > owned:\n return apology(\"You don't have enough shares\")\n\n #in the portfolio table, add a negative to the number field of the purchased stock\n #in the cash table, lookup the current price and add the cash to the user's cash balanace\n else:\n pay = lookup(request.form.get(\"symbol\"))\n user_number = int(request.form.get(\"shares\"))\n db.execute(\"UPDATE users SET cash=cash+:total WHERE id=:userid\", total=(pay['price'] * user_number), userid=session[\"user_id\"])\n\n user_number = int(request.form.get(\"shares\")) * -1\n db.execute(\"INSERT INTO portfolio (stock, number, price, trans_price, userid) VALUES (:stock, :number, :price, :trans_price, :userid)\", stock=user_stock, number=user_number, price=(pay['price'] * user_number), trans_price=usd(pay['price']), userid=session[\"user_id\"])\n\n user_id=session[\"user_id\"]\n return redirect(url_for('index'))\n\n if request.method == \"GET\":\n #get stocks from portfolio and return to html form\n stocks = db.execute(\"SELECT stock FROM portfolio WHERE userid=:id GROUP BY stock\", id=session[\"user_id\"])\n return render_template(\"sell.html\", stocks=stocks)", "def buy(self, stock, amount):\n self.orders[stock] += amount", "def transact_shares(self, action, quantity, price, commission, bid=None, ask=None):\n if bid is None: \n bid = price\n if ask is None:\n ask = price\n\n if action is None:\n return\n\n self.total_commission += commission\n\n # Adjust total bought and sold\n if action == \"BOT\":\n self.avg_bot = (self.avg_bot * self.buys + price * quantity) / (self.buys + quantity)\n\n if self.net < 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (self.avg_price - price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n # Increasing long position\n self.avg_price = (self.avg_price * self.net + price * quantity + commission) / (self.net + quantity)\n self.buys += quantity\n self.total_bot = self.buys * self.avg_bot\n\n # action == \"SLD\"\n else:\n self.avg_sld = (self.avg_sld * self.sells + price * quantity) / (self.sells + quantity)\n\n if self.net > 0:\n self.realised_pnl += min(quantity, abs(self.net)) * (price - self.avg_price) - commission # Adjust realised PNL\n commission = 0 # assume commission is all in realised_pnl\n\n self.avg_price = (self.avg_price * self.net - price * quantity - commission) / (self.net - quantity)\n self.sells += quantity\n self.total_sld = self.sells * self.avg_sld\n\n # Adjust net values, including commissions\n self.net = self.buys - self.sells\n self.net_total = self.total_sld - self.total_bot\n self.net_incl_comm = self.net_total - self.total_commission\n self.cost_basis = self.net * self.avg_price\n\n self.update_market_value(bid, ask)", "def buy(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price += np.abs(slip_factor)\n\n self.trade_manager.buy(from_symbol, to_symbol, price, amount, date)", "def sell():\n \n #Create list of stocks in users portfolio\n stocks = [s for s in portfolio() if 'symbol' in s.keys()]\n\n #User arrived via GET\n if request.method == 'GET':\n #Return sell.html\n return render_template('sell.html', stocks=stocks)\n\n #User arrived via POST\n else:\n if request.method == 'POST':\n\n #Set variable for selected stock\n stock = [s for s in stocks if s['symbol'] == request.form.get('symbol')][0]\n\n #Make sure user has enough stock to sell\n if int(request.form.get('shares')) > stock['shares']:\n return apology('too many shares', 400)\n\n else:\n #See what stock is currently selling for and store in variable\n price = lookup(stock['symbol'])['price']\n\n #Add transaction to history\n trans = Transactions(symbol=stock['symbol'].upper(), shares=(int(request.form.get('shares')) * -1), \n price=price, transacted=datetime.now(), owner=session['user_id'])\n db.session.add(trans)\n db.session.commit()\n\n #update user's cash\n Users.query.filter_by(id=session['user_id']).first().cash += (price * int(request.form.get('shares')))\n db.session.commit()\n\n return redirect('/')", "async def _submit_trade_sell(self, trade: Dict[str, Any]) -> str:\n\n pair = trade['pair']\n filled_quantity = trade['quantity'] - trade['remaining']\n base_mult = await self.market.get_pair_base_mult(config['trade_base'], pair)\n\n if filled_quantity > 0.0:\n min_size = self.market.min_trade_size / base_mult\n if min_size < self.market.min_trade_sizes[pair]:\n min_size = self.market.min_trade_sizes[pair]\n\n min_value = min_size / filled_quantity\n order_id = await self.api.sell_limit(pair, filled_quantity, min_value)\n\n if order_id is None:\n quote = pair.split('-')[1]\n reserved = config['remit_reserved'][quote] if quote in config['remit_reserved'] else 0.0\n balance = await self.api.get_balance(quote)\n\n if balance is None:\n self.log.error(\"Could not get available balance for {}!\", quote)\n return None\n\n balance -= reserved\n\n if balance >= min_size:\n min_value = min_size / balance\n self.log.warning(\"{} re-trying sell with available balance {}.\", pair, balance)\n order_id = await self.api.sell_limit(pair, balance, min_value)\n\n if order_id is None:\n self.log.error(\"{} could not submit market sell for trade {}!\", pair, trade['order_id'])\n\n else:\n self.log.info(\"{} submitted market sell for trade {}.\", pair, trade['order_id'])\n\n return order_id\n\n self.log.warning(\"{} has no filled volume on trade {} for sell.\", pair, trade['order_id'])\n return None", "def sell():\n\n # User reached route via GET\n if request.method == \"GET\":\n\n # Display quote\n return render_template(\"sell.html\")\n\n # User reached route via POST\n else:\n\n sell_symbol = request.form.get(\"sell_symbol\").upper()\n sell_amount = float(request.form.get(\"sell_amount\"))\n\n temp_symbol = []\n\n if sell_amount < 1:\n return apology(\"You can only sell a positive amount. To buy, please go to buy\", 403)\n\n user_id = session[\"user_id\"]\n\n # Check if client owns stock\n symbol = db.execute(\"SELECT DISTINCT symbol FROM stocks WHERE user_id = :user_id\", user_id = user_id)\n amount = db.execute(\"SELECT SUM(amount) FROM stocks WHERE symbol = :symbol\", symbol = sell_symbol)\n\n for i in range(len(symbol)):\n temp_symbol.append(symbol[i][\"symbol\"])\n\n if sell_symbol not in temp_symbol:\n return apology(\"Sorry, you don't own this stock\", 403)\n\n if amount[0][\"SUM(amount)\"] < sell_amount:\n return apology(\"Sorry, you don't have enough stocks\", 403)\n\n stock_price = float(lookup(sell_symbol)[\"price\"])\n\n # Returns a list\n cash_list = db.execute(\"SELECT cash FROM users WHERE id= :user_id\", user_id = user_id)\n cash = float(cash_list[0]['cash'])\n\n current_cash = cash + (sell_amount * stock_price)\n\n db.execute(\"INSERT INTO stocks (symbol, price, amount, user_id) VALUES (:symbol, :price, :amount, :user_id)\", {\"symbol\": sell_symbol, \"price\": stock_price, \"amount\": (-1 * sell_amount), \"user_id\": user_id})\n\n db.execute(\"UPDATE users SET cash = :current_cash WHERE id = :user_id\", {\"current_cash\": current_cash, \"user_id\": user_id})\n\n # Redirect user to home page\n return redirect(\"/\")", "def reverse_buy(self, amount):\n trade_amount = 0\n precision = pow(10, self.pair.get_base_token().get_decimals() - self.pair.get_quote_token().get_decimals())\n for i in range(len(self.book[Trade.WAY_SELL])):\n offer = self.book[Trade.WAY_SELL][i]\n amount_quote = offer.get_quote_amount() # GAS\n amount_base = offer.get_base_amount() # NEO\n price = offer.get_price()\n\n if amount_quote >= amount:\n if self.pair.get_exchange().get_fee_token():\n trade_amount = trade_amount + amount*price * precision\n else:\n trade_amount = trade_amount + amount*price * precision / (1 - self.pair.get_exchange().get_fees())\n return int(trade_amount)\n\n '''\n Is the offered amount less than needed, you can only buy the offered amount and continue\n '''\n trade_amount = trade_amount + amount_base\n amount = amount - amount_quote\n\n '''\n Not enough volume or amount to high\n '''\n raise KeyError(\"Not enough offers in orderbook. Low volume or amount to high.\")", "def buy_stock (self, ticker, buy_date, sell_date, amount):\n\n if self.__buy_stock_init__(ticker, buy_date, sell_date, amount) == False:\n return\n\n if self.__get_hist__() == False:\n return\n\n self.__calc_no_shares_to_buy__()\n self.__update_buy_amount__() \n self.__save_buy__()", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n \n # Creates dict\n symbol_info = lookup(request.form.get(\"symbol\"))\n \n # Checks that symbol exists\n if symbol_info == None:\n return apology(\"Invalid Symbol\", 403)\n \n # Ensure number of shares was submitted\n if not request.form.get(\"shares\"):\n return apology(\"must provide number of shares\", 403)\n \n # Ensure shares is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough money to buy share\n user_money = db.execute(\"SELECT cash FROM users WHERE id=:userid\", userid=session[\"user_id\"])\n cash = float(user_money[0][\"cash\"])\n if cash < float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")):\n return apology(\"Not enough money\", 403)\n \n # Update user\n updated_money = cash - (float(symbol_info[\"price\"]) * float(request.form.get(\"shares\")))\n db.execute(\"UPDATE users SET cash = :updated WHERE id=:usid\", updated=updated_money, usid=session[\"user_id\"])\n \n # Update shares table\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id = :usid\", usid=session[\"user_id\"])\n exist = 0\n for i in range(len(symbol_dicts)):\n if symbol_dicts[i][\"share\"].upper() == request.form.get(\"symbol\").upper():\n exist = 1\n break\n \n if exist == 0:\n db.execute(\"INSERT INTO shares (user_id, share, share_count) VALUES (:usid, :symbol, :count)\", usid=session[\"user_id\"], symbol=request.form.get(\"symbol\").upper(), count=int(request.form.get(\"shares\")))\n else:\n db.execute(\"UPDATE shares SET share_count = share_count + :count WHERE share = :symbol AND user_id = :usid\", count=int(request.form.get(\"shares\")), symbol=request.form.get(\"symbol\").upper(), usid=session[\"user_id\"])\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol_info[\"symbol\"], shares=request.form.get(\"shares\"), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(symbol_info[\"price\"]))\n \n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"buy.html\")", "async def trade(self, ctx, sell_amount : float, sell_symbol, \n buy_amount : float, buy_symbol, date=None):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n portfolio.Sell(sell_amount, sell_symbol)\n portfolio.Buy(buy_amount, buy_symbol)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (user, portfolio.Value()))\n portfolio.Save()", "def sell_limit(self, market, quantity, rate):\n return self.api_query('Trade', {'type':'sell', 'pair': market, 'amount': quantity, 'rate':'%.8f'%rate})", "def volume_sell(self, price=None):\n if price is None:\n return Library.functions.volume_sell(self._book)\n return Library.functions.volume_sell_price(self._book, price)", "def sell(self):\n self.status = \"sold\"\n return self", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n \n # calculate total price for the sell request\n company_name = lookup(request.form.get(\"symbol\"))[\"name\"]\n curr_price = lookup(request.form.get(\"symbol\"))[\"price\"]\n total_price = curr_price * -int(request.form.get(\"shares\"))\n\n # db.execute returns list of dicts (one dict, actually), where key == \"cash\" and value - cash left in user's account\n cash_left = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])[0][\"cash\"]\n\n # calculate if user has enough shares for operation to be made\n shares = db.execute(\"SELECT SUM(Shares) FROM portfolio WHERE id = :id AND Company = :company GROUP BY Company\", id = session[\"user_id\"], company=company_name)\n\n if shares[0][\"SUM(Shares)\"] < int(request.form.get(\"shares\")):\n return apology(\"you do not have enough shares for this operation to be completed\")\n\n # add operation to users portfolio\n exe = db.execute(\"INSERT INTO portfolio (id, Symbol, Company, Shares, Price, Total) VALUES(:id, :Symbol, :Company, :Shares, :Price, :Total)\",\n id=session[\"user_id\"], Symbol=request.form.get(\"symbol\").upper(), Company=lookup(request.form.get(\"symbol\"))[\"name\"],\n Shares=-int(request.form.get(\"shares\")), Price=curr_price, Total=total_price)\n\n # update cash\n db.execute('UPDATE users SET cash = :cash WHERE id = :id', cash=cash_left - total_price, id=session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n # extract list of companies user has in portfolio\n companies = db.execute(\"SELECT Symbol FROM portfolio WHERE id = :id GROUP BY Symbol\", id = session[\"user_id\"])\n\n return render_template(\"sell.html\", companies = companies)", "def sell_etf(self, etf_name, sell_date, sell_price, commissions, n_shares='all'): \n assert etf_name in self.etfs.keys(), 'ETF not in portfolio'\n assert isinstance(sell_date, date), 'Sell_date parameter needs to be a datetime.date instance'\n assert isinstance(sell_price, float), 'Sell_price must be float'\n assert isinstance(commissions, float), 'Commissions must be float'\n assert n_shares == 'all' or isinstance(n_shares, int), 'N_shares must be int'\n if n_shares == 'all':\n self.etfs[etf_name].sell(sell_date, sell_price, commissions)\n new_file = pd.read_csv(self.infoFile, index_col='Name')\n new_file.loc[etf_name, 'sell_date'] = sell_date\n new_file.loc[etf_name, 'sell_price'] = sell_price\n new_file.loc[etf_name, 'sell_commissions'] = commissions\n new_file.to_csv(self.infoFile)\n else:\n new_file = pd.read_csv(self.infoFile, index_col='Name')\n assert 0 < n_shares <= new_file.loc[etf_name, 'n_shares'], f'Number of shares must be between 0 and {new_file.loc[etf_name, \"n_shares\"]}'\n new_file.loc[etf_name, 'n_shares'] -= n_shares # Take out the sold shares\n prevEtf = self.get_etf_by_name(etf_name)\n newName = self.find_next_name(etf_name)\n newEtf = ETF(newName, prevEtf.buy_date, n_shares, prevEtf.buy_price, prevEtf.commissions[0], sell_date, sell_price, prevEtf.info, commissions)\n new_file.to_csv(self.infoFile)\n self.add_etf(newEtf)\n self.refresh()", "def sellOutAllStock(self):\n # GET ALL POSITIONS FOR ACCOUNT\n open_positions = self.open_positions.find({\"Trader\": self.user[\"Name\"], \"Asset_Type\" : self.asset_type, \"Account_ID\" : self.account_id})\n\n for position in open_positions:\n\n trade_data = {\n \"Symbol\": position[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": position[\"Aggregation\"],\n \"Strategy\": position[\"Strategy\"],\n \"Asset_Type\": position[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n queued = self.queue.find_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": position[\"Symbol\"], \"Strategy\": position[\"Strategy\"], \"Asset_Type\": position[\"Asset_Type\"], \"Account_ID\" : self.account_id})\n\n if not queued:\n\n self.placeOrder(trade_data, position, orderType=\"MARKET\")", "def trading_alg(self,table_name = None, buy_now = False, strategy_name = \"sma9\"):\n \n self.bs.buyed_stocks = 0\n self.bs.money = self.bs.startCredit\n spy_stocks = self.load_data(table_name = table_name, symbols = [\"SPY\"])\n spy_stocks = FinI.add_indicators(spy_stocks)\n \n if self.symbols:\n symbols = self.symbols\n else:\n symbols = self.db.get_symbols()\n\n # symbols = [\"INTC\",\"BYND\",\"ZM\",\"NKE\",\"HIMX\",\"JKS\",\"ENPH\",\"DUK\",\"GE\",\"DIS\",\"LEVI\",\"NVAX\",\"SLCA\",\"GPS\"]\n \n for symbol in symbols:\n print(\"symbol: \" + str(symbol))\n \n sub_data = self.load_data(table_name = table_name, symbols = symbol)\n if len(sub_data) < 1:\n break\n\n self.bt_stocks = FinI.add_indicators(sub_data)\n self.bt_stocks = FinI.add_fib(self.bt_stocks)\n # print(self.bt_stocks)\n print(self.bt_stocks[\"sma30\"])\n print(\"calculating percent change:\" + str(symbol))\n # sub_data = self.stocks.loc[self.stocks.sym ==symbol[0]].sort_values(by='index')\n \n self.symbols = symbol[0]\n \n # self.prev_stock = sub_data.iloc[0]\n # self.bt_stocks.iloc[0] = sub_data.iloc[0]\n\n # self.sell_marks = self.sell_marks.iloc[0:0]\n # self.buy_marks = self.buy_marks.iloc[0:0]\n self.bs.transactions = 0\n self.bs.profit_perc = 0\n \n # trend_indicator = \n # TODO mechanism for select strategies\n # self.ts_boll(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks)\n self.ts_eval(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_logic = strategy_name)\n\n # call the method with passed and assembled name\n # method = getattr(self, 'ts_' + strategy_name)\n # method(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_name = strategy_name)", "def sell():\n\n if request.method == \"GET\":\n\n #Query for all the stocks in posession.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY name\", id=session[\"user_id\"])\n\n return render_template(\"sell.html\", ports=ports)\n if request.method == \"POST\":\n #Access the form data\n symbol = request.form.get(\"symbol\")\n\n #Check if the shares was an integer\n try:\n shares = int(request.form.get(\"shares\"))\n except:\n return apology (\"Please enter a whole number\", 400)\n\n #Query for the total quantity of that stock in posession\n get_quantity = db.execute(\"SELECT quantity FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n #Convert the quantity dict to int\n get_quantity_int = int(get_quantity[0]['quantity'])\n\n #Check if the user input a positive number.\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n\n #Get the current date and time\n now = datetime.now()\n\n date_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n if shares < 0:\n return apology (\"Please enter a positive value\", 403)\n #Lookup the stock symbol data (price, symbol, company name)\n if shares > get_quantity_int:\n return apology (\"Selling more than you own?\", 400)\n stock = lookup(symbol)\n\n stock_price = stock['price']\n\n #Created a new table using CREATE TABLE 'portfolio' ('user' text, 'quantity' integer, 'price' numeric(15, 2), 'symbol' text)\n\n #Get the total cash value of the user from the database\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float\n check_cash = float(get_cash[0]['cash'])\n\n if not stock:\n return apology (\"Please enter a valid stock\", 403)\n\n #Compute the total amount of the shares sold (One company stock only)\n total = stock_price * float(shares)\n\n #Update the total amount of cash in hand by adding the sold stocks.\n db.execute(\"UPDATE users SET cash = cash + :total WHERE id = :id\", id=session[\"user_id\"], total=total)\n\n #Check if the total quantity of shares is equal to the quantity the user is trying to sell.\n #Add the stock in the history table\n history = db.execute(\"INSERT INTO history (symbol, quantity, price, transacted, id) VALUES (?, ?, ?, ?, ?)\", symbol, int(shares) * -1, float(stock_price), date_time, session[\"user_id\"] )\n\n #If it's equal then delete the stock in the portfolio. #Else, Update the quantity of that stock in the portfolio.\n if shares == get_quantity_int:\n db.execute(\"DELETE FROM portfolio WHERE id = :id AND symbol = :symbol\", id=session['user_id'], symbol=symbol)\n flash('You successfully sold the stock!')\n else:\n db.execute(\"UPDATE portfolio SET quantity = quantity - :shares, total = total -:total WHERE id = :id AND symbol = :symbol\", id=session[\"user_id\"], symbol=symbol, shares=shares, total=total)\n flash('You successfully sold the stock!')\n return redirect (url_for('index'))", "def sell():\n\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n rows = db.execute(\"Select Stock, sum(Num) as Number from portfolio where User = :User and Stock = :symbol group by Stock\", User = session.get(\"user_id\"), symbol = symbol)\n num = rows[0][\"Number\"]\n num1 = int(request.form.get(\"number\"))\n # render apology if the user fails to select a stock\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 403)\n\n # Ensure number of shares\n elif not request.form.get(\"number\"):\n return apology(\"must provide number\", 403)\n\n # Ensure if users owns the number of stocks\n elif num1 > num:\n return apology(\"not enough stock\", 403)\n\n #log sale as a negative quant of shares at the current slide\n\n\n stock = symbol\n\n price = float(lookup(stock)['price'])\n\n\n num = -num1\n result = db.execute(\"INSERT INTO portfolio (User, Stock, Price, Num) VALUES(:User, :Stock, :Price, :Num)\", User = session.get(\"user_id\"), Stock = stock, Price = price, Num = num)\n\n\n #update the user cash\n amount = round(num*price,2)\n result = db.execute(\"UPDATE users set cash = cash - :amount where id = :User \", User = session.get(\"user_id\"), amount = amount)\n\n\n# if not result:\n# return apology(\"username already exists\", 403)\n\n # Log user in\n # Query database for username\n# rows = db.execute(\"SELECT * FROM users WHERE username = :username\", username=request.form.get(\"username\"))\n\n\n # session[\"user_id\"] = rows[0][\"id\"]\n\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n\n rows = db.execute(\"Select Stock, sum(Num) as Number from portfolio where User = :User group by Stock\", User = session.get(\"user_id\"))\n stockss = rows\n stocksss = []\n for stock in stockss:\n symbol = str(stock[\"Stock\"])\n stocksss.append(symbol)\n\n return render_template(\"sell.html\", x = stocksss)\n\n\n\n\n # get current price for each group (ie AAPL) with help from lookup function (which remember, returns a dict)" ]
[ "0.76451164", "0.748746", "0.7260813", "0.71164954", "0.7057435", "0.70490545", "0.70268077", "0.69872165", "0.6972749", "0.6966809", "0.6915586", "0.68977857", "0.68829334", "0.6849716", "0.68191767", "0.68124473", "0.68022114", "0.6801836", "0.67799073", "0.67698354", "0.67686903", "0.67685384", "0.6763028", "0.6761169", "0.6750394", "0.6737747", "0.67146623", "0.66963017", "0.6695012", "0.66774476", "0.66640884", "0.66626096", "0.6654251", "0.66449285", "0.66422653", "0.66262025", "0.6608471", "0.65532064", "0.6548866", "0.6538862", "0.65250057", "0.65241355", "0.6516954", "0.65059006", "0.6501579", "0.6487432", "0.6485335", "0.64743656", "0.64702666", "0.6451987", "0.6428272", "0.6395518", "0.63914704", "0.63885033", "0.63833195", "0.6382582", "0.63755643", "0.63674843", "0.6364422", "0.6359301", "0.63532573", "0.63522965", "0.6348651", "0.634843", "0.6285282", "0.62740654", "0.6274011", "0.62734246", "0.62478936", "0.62463963", "0.62325263", "0.6216905", "0.61967844", "0.6180049", "0.61729735", "0.6158238", "0.61084294", "0.6108257", "0.6099662", "0.6077487", "0.6073024", "0.6070951", "0.60680014", "0.6059142", "0.6051766", "0.6027577", "0.60202867", "0.60192585", "0.60135007", "0.6010428", "0.59895015", "0.59863704", "0.5983354", "0.59814173", "0.59808", "0.5960544", "0.5959601", "0.5958356", "0.5951698", "0.5944001" ]
0.6277801
65
Clone a functional `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments
def _clone_functional_model(model, input_tensors=None): if not isinstance(model, Model): raise ValueError('Expected `model` argument ' 'to be a `Model` instance, got ', model) if isinstance(model, Sequential): raise ValueError('Expected `model` argument ' 'to be a functional `Model` instance, ' 'got a `Sequential` instance instead:', model) layer_map = {} # Cache for created layers. tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)} if input_tensors is None: # Create placeholders to build the model on top of. input_layers = [] input_tensors = [] for layer in model.input_layers: input_tensor = Input(batch_shape=layer.batch_input_shape, dtype=layer.dtype, sparse=layer.sparse, name=layer.name) input_tensors.append(input_tensor) # Cache newly created input layer. newly_created_input_layer = input_tensor._keras_history[0] layer_map[layer] = newly_created_input_layer for original_input_layer, cloned_input_layer in zip(model.input_layers, input_layers): layer_map[original_input_layer] = cloned_input_layer else: # Make sure that all input tensors come from a Keras layer. # If tensor comes from an input layer: cache the input layer. input_tensors = to_list(input_tensors) _input_tensors = [] for i, x in enumerate(input_tensors): if not K.is_keras_tensor(x): name = model.input_layers[i].name input_tensor = Input(tensor=x, name='input_wrapper_for_' + name) _input_tensors.append(input_tensor) # Cache newly created input layer. original_input_layer = x._keras_history[0] newly_created_input_layer = input_tensor._keras_history[0] layer_map[original_input_layer] = newly_created_input_layer else: _input_tensors.append(x) input_tensors = _input_tensors for x, y in zip(model.inputs, input_tensors): tensor_map[x] = (y, None) # tensor, mask # Iterated over every node in the reference model, in depth order. depth_keys = list(model._nodes_by_depth.keys()) depth_keys.sort(reverse=True) for depth in depth_keys: nodes = model._nodes_by_depth[depth] for node in nodes: # Recover the corresponding layer. layer = node.outbound_layer # Get or create layer. if layer not in layer_map: # Clone layer. new_layer = layer.__class__.from_config(layer.get_config()) layer_map[layer] = new_layer layer = new_layer else: # Reuse previously cloned layer. layer = layer_map[layer] # Don't call InputLayer multiple times. if isinstance(layer, InputLayer): continue # Gather inputs to call the new layer. reference_input_tensors = node.input_tensors reference_output_tensors = node.output_tensors # If all previous input tensors are available in tensor_map, # then call node.inbound_layer on them. computed_data = [] # List of tuples (input, mask). for x in reference_input_tensors: if x in tensor_map: computed_data.append(tensor_map[x]) if len(computed_data) == len(reference_input_tensors): # Call layer. if node.arguments: kwargs = node.arguments else: kwargs = {} if len(computed_data) == 1: computed_tensor, computed_mask = computed_data[0] if has_arg(layer.call, 'mask'): if 'mask' not in kwargs: kwargs['mask'] = computed_mask output_tensors = to_list( layer(computed_tensor, **kwargs)) output_masks = to_list( layer.compute_mask(computed_tensor, computed_mask)) computed_tensors = [computed_tensor] computed_masks = [computed_mask] else: computed_tensors = [x[0] for x in computed_data] computed_masks = [x[1] for x in computed_data] if has_arg(layer.call, 'mask'): if 'mask' not in kwargs: kwargs['mask'] = computed_masks output_tensors = to_list( layer(computed_tensors, **kwargs)) output_masks = to_list( layer.compute_mask(computed_tensors, computed_masks)) # Update tensor_map. for x, y, mask in zip(reference_output_tensors, output_tensors, output_masks): tensor_map[x] = (y, mask) # Check that we did compute the model outputs, # then instantiate a new model from inputs and outputs. output_tensors = [] for x in model.outputs: assert x in tensor_map, 'Could not compute output ' + str(x) tensor, _ = tensor_map[x] output_tensors.append(tensor) return Model(input_tensors, output_tensors, name=model.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone_model(model, input_tensors=None):\n if isinstance(model, Sequential):\n return _clone_sequential_model(model, input_tensors=input_tensors)\n else:\n return _clone_functional_model(model, input_tensors=input_tensors)", "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def clone(self):\n return _libsbml.Model_clone(self)", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model", "def clone(self, camera = None, light = None):\r\n newModel = Model(file_string = \"__clone__\", x=self.unif[0], y=self.unif[1], z=self.unif[2],\r\n rx=self.unif[3], ry=self.unif[4], rz=self.unif[5], sx=self.unif[6], sy=self.unif[7], sz=self.unif[8],\r\n cx=self.unif[9], cy=self.unif[10], cz=self.unif[11])\r\n newModel.buf = self.buf\r\n newModel.vGroup = self.vGroup\r\n newModel.shader = self.shader\r\n newModel.textures = self.textures\r\n return newModel", "def _clone_sequential_model(model, input_tensors=None):\n if not isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a `Sequential` model instance, '\n 'but got:', model)\n\n def clone(layer):\n return layer.__class__.from_config(layer.get_config())\n\n layers = [clone(layer) for layer in model.layers]\n if input_tensors is None:\n return Sequential(layers=layers, name=model.name)\n else:\n if len(to_list(input_tensors)) != 1:\n raise ValueError('To clone a `Sequential` model, we expect '\n ' at most one tensor '\n 'as part of `input_tensors`.')\n x = to_list(input_tensors)[0]\n if K.is_keras_tensor(x):\n origin_layer = x._keras_history[0]\n if isinstance(origin_layer, InputLayer):\n return Sequential(layers=[origin_layer] + layers,\n name=model.name)\n else:\n raise ValueError('Cannot clone a `Sequential` model on top '\n 'of a tensor that comes from a Keras layer '\n 'other than an `InputLayer`. '\n 'Use the functional API instead.')\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + str(x.name))\n input_layer = input_tensor._keras_history[0]\n return Sequential(layers=[input_layer] + layers, name=model.name)", "def clone(self,\n from_model: entities.Model,\n model_name: str,\n dataset: entities.Dataset = None,\n configuration: dict = None,\n status=None,\n scope=None,\n project_id: str = None,\n labels: list = None,\n description: str = None,\n tags: list = None,\n train_filter: entities.Filters = None,\n validation_filter: entities.Filters = None,\n ) -> entities.Model:\n from_json = {\"name\": model_name,\n \"packageId\": from_model.package_id,\n \"configuration\": from_model.configuration,\n \"metadata\": from_model.metadata,\n \"outputType\": from_model.output_type,\n \"inputType\": from_model.input_type}\n if project_id is None:\n project_id = self.project.id\n from_json['projectId'] = project_id\n if dataset is not None:\n if labels is None:\n labels = list(dataset.labels_flat_dict.keys())\n from_json['datasetId'] = dataset.id\n if labels is not None:\n from_json['labels'] = labels\n # if there are new labels - pop the mapping from the original\n _ = from_json['configuration'].pop('id_to_label_map', None)\n _ = from_json['configuration'].pop('label_to_id_map', None)\n if configuration is not None:\n from_json['configuration'].update(configuration)\n if description is not None:\n from_json['description'] = description\n if tags is not None:\n from_json['tags'] = tags\n if scope is not None:\n from_json['scope'] = scope\n if status is not None:\n from_json['status'] = status\n\n metadata = self._set_model_filter(metadata=from_model.metadata,\n train_filter=train_filter,\n validation_filter=validation_filter)\n if metadata['system']:\n from_json['metadata'] = metadata\n success, response = self._client_api.gen_request(req_type='post',\n path='/ml/models/{}/clone'.format(from_model.id),\n json_req=from_json)\n if not success:\n raise exceptions.PlatformException(response)\n new_model = entities.Model.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n package=from_model._package)\n\n if new_model._dataset is not None and new_model._dataset.readonly is False:\n logger.warning(\n \"Model is using an unlocked dataset {!r}. Make it readonly for training reproducibility\".format(\n new_model.dataset.name))\n\n return new_model", "def clone_keras_model(target, custom_objects=None):\n new_model = model_from_json(target.to_json(),custom_objects)\n new_model.set_weights(target.get_weights())\n return new_model", "def clone(self):\n return _libsbml.ModelDefinition_clone(self)", "def copy(self):\n model = LBM(\n n_row_clusters=self.n_row_clusters,\n n_column_clusters=self.n_column_clusters,\n max_iter=self.max_iter,\n n_init=self.n_init,\n n_init_total_run=self.n_init_total_run,\n n_iter_early_stop=self.nb_iter_early_stop,\n rtol=self.rtol,\n atol=self.atol,\n verbosity=self.verbosity,\n use_gpu=self.use_gpu,\n gpu_index=self.gpu_index,\n )\n model._nb_rows = self._nb_rows\n model._nb_cols = self._nb_cols\n model.loglikelihood_ = self.loglikelihood_\n model._np = self._np\n model._cupyx = self._cupyx\n model.trained_successfully_ = self.trained_successfully_\n model.pi_ = copy.copy(self.pi_)\n model.alpha_1_ = copy.copy(self.alpha_1_)\n model.alpha_2_ = copy.copy(self.alpha_2_)\n model.tau_1_ = copy.copy(self.tau_1_)\n model.tau_2_ = copy.copy(self.tau_2_)\n return model", "def make_cloning_model(input_shape=(66, 200, 3)):\n # Create the Sequential model\n print(\"input shape\", input_shape)\n model = Sequential()\n model.add(Lambda(lambda x: x / 128. - 1., output_shape=input_shape, input_shape=input_shape))\n add_conv_type1(model, 12, input_shape)\n add_conv_type1(model, 18)\n add_conv_type1(model, 24)\n add_conv_type2(model, 30)\n add_conv_type2(model, 30)\n model.add(Flatten(input_shape=(13, 33, 30)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n return model", "def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone", "def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone", "def clone(self):\r\n obj = CylinderModel()\r\n obj.params = copy.deepcopy(self.params)\r\n return obj", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def clone(self, **kwargs):\n return attr.evolve(self, **kwargs)", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def clone(self):\n return _libsbml.Submodel_clone(self)", "def clone(self):\n return _libsbml.FbcModelPlugin_clone(self)", "def clone(self):\n return _libsbml.Input_clone(self)", "def clone(self):\n # make copies of my state\n beta = self.beta\n theta = self.theta.clone()\n sigma = self.sigma.clone()\n likelihoods = self.prior.clone(), self.data.clone(), self.posterior.clone()\n\n # make one and return it\n return type(self)(beta=beta, theta=theta, likelihoods=likelihoods, sigma=sigma)", "def clone(self, **kwargs):\n new_inst = MetaTensor(self.as_tensor().clone(**kwargs))\n new_inst.__dict__ = deepcopy(self.__dict__)\n return new_inst", "def clone(self):\n return _libsbml.ListOfInputs_clone(self)", "def clone(self, *args, **kwargs):\n new_self = copy.copy(self)\n kwargs = self.get_arguments(args, kwargs, onlykeys=True, onlyused=True)\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for key in kwargs:\n if _map_parameters is not None and key in _map_parameters:\n setattr(new_self, _map_parameters[key], kwargs[key])\n else:\n setattr(new_self, key, kwargs[key])\n return new_self", "def clone(self) -> 'State':\n return State(self.sim, state=self.get_state().copy())", "def _try_clone_model(model):\n try:\n return copy.deepcopy(model)\n except Exception:\n warnings.warn(\n \"Failed to clone model. Model state might be mutated during verification.\"\n )\n return model", "def clone(self):\n return _libsbml.CompModelPlugin_clone(self)", "def create_original_model():\n model = Sequential()\n model.add(Embedding(max_features,\n embedding_dims,\n input_length=maxlen))\n model.add(Dropout(0.2))\n model.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model", "def clone(self):\n return self.__class__(self.name, *self)", "def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp", "def clone(self):\n\n clone = self.__class__.__new__(self.__class__)\n clone._graph_state = self._graph_state\n clone._molecule_state = self._molecule_state\n return clone", "def clone(self):\n return _libsbml.ExternalModelDefinition_clone(self)", "def copy(self, new_name):\n new_model = dill.loads(dill.dumps(self.model))\n model_fn = lambda: new_model\n return self.__class__(new_name, model_fn)", "def clone(self):\n return _libsbml.MultiModelPlugin_clone(self)", "def clone( self ):\n new = copy( self )\n try: del new.input_changed\n except AttributeError: pass\n return new", "def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def copy(self) -> \"Pipeline\":\n model = PipelineModel(self._config.as_dict(), vocab=copy.deepcopy(self.vocab))\n config = copy.deepcopy(self._config)\n\n pipeline_copy = Pipeline(model, config)\n pipeline_copy._model.load_state_dict(self._model.state_dict())\n\n return pipeline_copy", "def clone(self):\n return _libsbml.FbcAnd_clone(self)", "def clone(self, *args, **kwargs):\n return self.copy().reset(*args, **kwargs)", "def clone(self):\n return attr.evolve(self, env=dict(self._env))", "def clone(self):\n return _libsbml.ListOfSubmodels_clone(self)", "def copy_model(dst_model, src_model, const=0.0):\n \n params_dst = dst_model.named_parameters()\n params_src = src_model.named_parameters()\n dict_params_dst = dict(params_dst)\n with torch.no_grad():\n for name, param in params_src:\n if name in dict_params_dst:\n # NOTE: Must add a dummy float otherwise only setting 'reference' to old param.data\n dict_params_dst[name].set_(param.data + const)", "def clone(self):\n return _libsbml.LocalParameter_clone(self)", "def new(self, **kwargs):\n return self.__model__(**self._preprocess_params(kwargs))", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def copy_model_state(model):\n model_state = deepcopy(model.state_dict())\n return model_state", "def __init__(self,*args,**kwargs):\n super(CompositeModel1D,self).__init__(*args,**kwargs)\n for m in self._models:\n if not isinstance(m,FunctionModel1D):\n raise ModelTypeError('Input model %s is not a 1D model'%m)\n self._filters = None", "def clone(self):\n return _libsbml.ListOfModelDefinitions_clone(self)", "def clone(self, *args):\n return _SALOMERuntime.InputPyPort_clone(self, *args)", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def clone(self):\n raise NotImplementedError", "def clone_with_updates(self, **kwargs):\n fields_dict = self.to_dict()\n fields_dict.update(kwargs)\n return BindingPrediction(**fields_dict)", "def clone(self, first_order=None, allow_unused=None, allow_nograd=None):\n if first_order is None:\n first_order = self.first_order\n if allow_unused is None:\n allow_unused = self.allow_unused\n if allow_nograd is None:\n allow_nograd = self.allow_nograd\n return MAMLpp(\n clone_module(self.module),\n lr=self.lr,\n lrs=clone_named_parameters(self.lrs),\n first_order=first_order,\n allow_unused=allow_unused,\n allow_nograd=allow_nograd,\n )", "def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)", "def clone(self, clone=None):\r\n # copy specs from supplied object\r\n if clone is not None: [setattr(self, v, getattr(clone, v)) for v in vars(clone)]", "def clone(self):\n return attr.evolve(self)", "def clone(self):\n return shallow_clone(self)", "def clone(self):\n return _libsbml.FbcOr_clone(self)", "def clone(self) -> Mutator:\n raise NotImplementedError", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def clone(self):\n return _libsbml.ListOfLocalParameters_clone(self)", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def clone(self):\n return _libsbml.SBMLLocalParameterConverter_clone(self)", "def clones(module, num_copies):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])", "def _clone(self):\n #可见,这样可以将本类初始化参数全部赋给c对象,作为c的属性\n c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)\n c._sticky_filter = self._sticky_filter\n c._for_write = self._for_write\n c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n c._known_related_objects = self._known_related_objects\n c._iterable_class = self._iterable_class\n c._fields = self._fields\n return c", "def GetClone(self, *args, **kwargs):\n pass", "def clone(self):\n return _libsbml.SBase_clone(self)", "def _CloneOp(op, new_name, new_inputs):\n inputs = list(op.inputs)\n for new_input in new_inputs:\n inputs[new_input[0]] = new_input[1]\n return _OP_CLONER.Clone(op, inputs, new_name)", "def clone(self, *args):\n return _osgAnimation.Bone_clone(self, *args)", "def copy(self, **kwargs):\n return Tensor(self.data, **kwargs)", "def make_non_parallel_copy(model):\n def replace_data_parallel(container):\n for name, module in container.named_children():\n if isinstance(module, nn.DataParallel):\n setattr(container, name, module.module)\n if has_children(module):\n replace_data_parallel(module)\n\n # Make a copy of the model, because we're going to change it\n new_model = deepcopy(model)\n if isinstance(new_model, nn.DataParallel):\n new_model = new_model.module\n replace_data_parallel(new_model)\n\n return new_model", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def assign_model(original_model: nn.Module, pruned_model: nn.Module, cfg_mask: list) -> nn.Module:\n old_modules = list(original_model.named_modules())\n new_modules = list(pruned_model.named_modules())\n assert len(old_modules) == len(\n new_modules), f\"expected equal module nums, got {len(old_modules)} v.s. {len(new_modules)}\"\n\n first_linear = True\n bn_idx = 0 # the index of output bn mask for conv layers\n for i in range(len(old_modules)):\n old_name, old_module = old_modules[i]\n new_name, new_module = new_modules[i]\n\n assert old_name == new_name, f\"Expected same module name, got {old_name} and {new_name}\"\n\n if isinstance(old_module, nn.BatchNorm2d) or isinstance(old_module, nn.BatchNorm1d):\n idx = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx.size == 1:\n idx = np.resize(idx, (1,))\n\n assert new_module.weight.data.shape == old_module.weight.data[idx.tolist()].clone().shape\n new_module.weight.data = old_module.weight.data[idx.tolist()].clone()\n new_module.bias.data = old_module.bias.data[idx.tolist()].clone()\n new_module.running_mean = old_module.running_mean[idx.tolist()].clone()\n new_module.running_var = old_module.running_var[idx.tolist()].clone()\n\n bn_idx += 1\n pass\n elif isinstance(old_module, nn.Conv2d):\n old_conv_weight = old_module.weight.clone()\n if old_module.bias is not None:\n old_conv_bias = old_module.bias.clone()\n else:\n old_conv_bias = None\n\n # prune input dim\n if bn_idx - 1 != -1:\n # -1 is the first layer of conv, do not prune the input dimension\n idx_input = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx - 1].cpu().numpy())))\n if idx_input.size == 1:\n idx_input = np.resize(idx_input, (1,))\n old_conv_weight = old_conv_weight.data[:, idx_input.tolist(), :, :].clone()\n # prune output dim\n idx_output = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx_output.size == 1:\n idx_output = np.resize(idx_output, (1,))\n old_conv_weight = old_conv_weight.data[idx_output.tolist(), :, :, :].clone()\n if old_conv_bias:\n old_conv_bias = old_conv_bias.data[idx_output.tolist()].clone()\n\n assert old_conv_weight.shape == new_module.weight.shape, f\"Expected same shape to assign, got {old_conv_weight.shape} and {new_module.weight.shape}\"\n if old_conv_bias:\n assert old_conv_bias.shape == new_module.bias.shape, f\"Expected same shape to assigin, got {old_conv_bias.shape} and {new_module.bias.shape}\"\n\n new_module.weight.data = old_conv_weight.clone()\n if old_conv_bias:\n new_module.bias.data = old_conv_bias.clone()\n\n elif isinstance(old_module, nn.Linear):\n old_linear_weight = old_module.weight.clone()\n old_linear_bias = old_module.bias.clone()\n\n # prune the input dimension\n idx_input = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx - 1].cpu().numpy())))\n if idx_input.size == 1:\n idx_input = np.resize(idx_input, (1,))\n # if first_linear:\n # def gen_list(offset):\n # base_list = np.arange(7 * 7)\n # return base_list + offset * 49\n #\n # idx_input = [gen_list(x) for x in idx_input]\n # idx_input = np.concatenate(idx_input)\n # idx_input = np.sort(idx_input)\n # first_linear = False\n old_linear_weight = old_linear_weight.data[:, idx_input.tolist()].clone()\n\n # prune output layer\n if bn_idx == len(cfg_mask):\n # do not prune the output layer\n idx_output = np.arange(old_linear_weight.shape[0])\n else:\n # prune output dim\n idx_output = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx_output.size == 1:\n idx_output = np.resize(idx_output, (1,))\n old_linear_weight = old_linear_weight.data[idx_output.tolist(), :].clone()\n old_linear_bias = old_linear_bias.data[idx_output.tolist()].clone()\n\n assert old_linear_weight.shape == new_module.weight.shape, f\"Expected same shape to assign, got {old_conv_weight.shape} and {new_module.weight.shape}\"\n assert old_linear_bias.shape == new_module.bias.shape, f\"Expected same shape to assigin, got {old_conv_bias.shape} and {new_module.bias.shape}\"\n\n new_module.weight.data = old_linear_weight.clone()\n new_module.bias.data = old_linear_bias.clone()\n\n return pruned_model", "def _clone(self):\n c = self.__class__(\n model=self.model,\n query=self.query.chain(),\n using=self._db,\n hints=self._hints,\n )\n c._sticky_filter = self._sticky_filter\n c._for_write = self._for_write\n c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n c._known_related_objects = self._known_related_objects\n c._iterable_class = self._iterable_class\n c._fields = self._fields\n return c", "def copy(self):\n\n obj = MixedLMParams(self.k_fe, self.k_re, self.use_sqrt)\n obj.set_packed(self.get_packed().copy())\n return obj", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def clone(self):", "def clone(self):\n return self", "def copy(self):\n brain = Brain((self.structure), activation_function=self.activation_function)\n brain.weights = copy.deepcopy(self.weights)\n brain.biases = copy.deepcopy(self.biases)", "def __init__(self, ndf, n_layers, original_model, norm_layer, fc_relu_slope, fc_drop_out):\n super(ModifiedModel, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.features = nn.Sequential(*list(original_model.children())[:-1])\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n\n sequence = []\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 32)\n sequence += [\n nn.Linear(in_features=int(ndf/nf_mult_prev), out_features=int(ndf/nf_mult)),\n norm_layer(int(ndf/nf_mult)),\n nn.LeakyReLU(fc_relu_slope, True),\n nn.Dropout2d(p=fc_drop_out)\n ]\n\n sequence += [nn.Linear(in_features=int(ndf/nf_mult), out_features=1)] # output 1 channel prediction map\n self.linear_group = nn.Sequential(*sequence)", "def __copy__(self):\n # prepare unnamed arguments\n args = [getattr(self, arg) for arg in self._copy_conf['args']]\n\n # prepare named arguments\n kwargs = {}\n for arg in self._copy_conf['kwargs']:\n # if arg is a tuple, the first entry will be the named kwargs, and\n # the second will be the name of the attribute to copy\n name = arg\n if isinstance(arg, tuple):\n name, arg = arg\n if hasattr(self, arg):\n kwargs[name] = getattr(self, arg)\n\n # create the new instance\n new_copy = self.__class__(*args, **kwargs)\n\n # then copy attributes\n for attr_name in self._copy_conf['attrs']:\n if hasattr(self, attr_name):\n setattr(new_copy, attr_name, getattr(self, attr_name))\n\n return new_copy", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def clone(self):\n return _libsbml.FluxObjective_clone(self)", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def clone(self):\n return _libsbml.ListOfParameters_clone(self)", "def clone(self):\n return self.copy()", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def clone(self) -> Self:\n return clone(self, safe=True)", "def clone(self):\n return _libsbml.LayoutModelPlugin_clone(self)", "def make_clone(self, attrs=None, sub_clone=False):\n attrs = attrs or {}\n if not self.pk:\n raise ValidationError(\n \"{}: Instance must be saved before it can be cloned.\".format(\n self.__class__.__name__\n )\n )\n if sub_clone:\n duplicate = self\n duplicate.pk = None\n else:\n duplicate = self._create_copy_of_instance(self)\n\n for name, value in attrs.items():\n setattr(duplicate, name, value)\n\n duplicate.save()\n\n duplicate = self.__duplicate_o2o_fields(duplicate)\n duplicate = self.__duplicate_o2m_fields(duplicate)\n duplicate = self.__duplicate_m2o_fields(duplicate)\n duplicate = self.__duplicate_m2m_fields(duplicate)\n return duplicate", "def clone(self):\n return _libsbml.Parameter_clone(self)", "def clone(self, *args):\n return _SALOMERuntime.InputStudyPort_clone(self, *args)", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])" ]
[ "0.75288594", "0.71179146", "0.6991728", "0.69553876", "0.6682947", "0.66104007", "0.65559334", "0.64254403", "0.6322556", "0.6287403", "0.62824917", "0.6236048", "0.6216818", "0.6197873", "0.615267", "0.6146778", "0.6113265", "0.5923647", "0.5900907", "0.5888005", "0.58635086", "0.5854518", "0.5847757", "0.58328635", "0.57887185", "0.57576", "0.57119405", "0.55884403", "0.5585717", "0.5584986", "0.55820113", "0.55808306", "0.55780244", "0.55699307", "0.55651516", "0.5557365", "0.5554633", "0.5554173", "0.5543435", "0.55359244", "0.55359066", "0.5534098", "0.5521748", "0.550264", "0.5479082", "0.5469472", "0.5457048", "0.54444873", "0.5441363", "0.543161", "0.54250115", "0.54151195", "0.5412351", "0.5383545", "0.5378581", "0.5364146", "0.5353239", "0.53407043", "0.53355706", "0.53329206", "0.53274345", "0.53163415", "0.53069985", "0.5295926", "0.5289127", "0.52818", "0.52792364", "0.5275009", "0.5265897", "0.5261722", "0.5260925", "0.52424794", "0.5237881", "0.52115923", "0.52115184", "0.52048856", "0.5203346", "0.520053", "0.51993346", "0.51805043", "0.51798373", "0.5171531", "0.5171523", "0.5169748", "0.516382", "0.516019", "0.5159071", "0.5154621", "0.51484585", "0.51409286", "0.5139773", "0.513019", "0.51290894", "0.5127124", "0.51175773", "0.5115635", "0.51013434", "0.50962853", "0.5087924", "0.5087924" ]
0.68227845
4
Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments
def _clone_sequential_model(model, input_tensors=None): if not isinstance(model, Sequential): raise ValueError('Expected `model` argument ' 'to be a `Sequential` model instance, ' 'but got:', model) def clone(layer): return layer.__class__.from_config(layer.get_config()) layers = [clone(layer) for layer in model.layers] if input_tensors is None: return Sequential(layers=layers, name=model.name) else: if len(to_list(input_tensors)) != 1: raise ValueError('To clone a `Sequential` model, we expect ' ' at most one tensor ' 'as part of `input_tensors`.') x = to_list(input_tensors)[0] if K.is_keras_tensor(x): origin_layer = x._keras_history[0] if isinstance(origin_layer, InputLayer): return Sequential(layers=[origin_layer] + layers, name=model.name) else: raise ValueError('Cannot clone a `Sequential` model on top ' 'of a tensor that comes from a Keras layer ' 'other than an `InputLayer`. ' 'Use the functional API instead.') input_tensor = Input(tensor=x, name='input_wrapper_for_' + str(x.name)) input_layer = input_tensor._keras_history[0] return Sequential(layers=[input_layer] + layers, name=model.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone_model(model, input_tensors=None):\n if isinstance(model, Sequential):\n return _clone_sequential_model(model, input_tensors=input_tensors)\n else:\n return _clone_functional_model(model, input_tensors=input_tensors)", "def clone(self):\n return _libsbml.Model_clone(self)", "def make_cloning_model(input_shape=(66, 200, 3)):\n # Create the Sequential model\n print(\"input shape\", input_shape)\n model = Sequential()\n model.add(Lambda(lambda x: x / 128. - 1., output_shape=input_shape, input_shape=input_shape))\n add_conv_type1(model, 12, input_shape)\n add_conv_type1(model, 18)\n add_conv_type1(model, 24)\n add_conv_type2(model, 30)\n add_conv_type2(model, 30)\n model.add(Flatten(input_shape=(13, 33, 30)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n return model", "def clone_keras_model(target, custom_objects=None):\n new_model = model_from_json(target.to_json(),custom_objects)\n new_model.set_weights(target.get_weights())\n return new_model", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def create_original_model():\n model = Sequential()\n model.add(Embedding(max_features,\n embedding_dims,\n input_length=maxlen))\n model.add(Dropout(0.2))\n model.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model", "def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone", "def _clone_functional_model(model, input_tensors=None):\n if not isinstance(model, Model):\n raise ValueError('Expected `model` argument '\n 'to be a `Model` instance, got ', model)\n if isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a functional `Model` instance, '\n 'got a `Sequential` instance instead:', model)\n\n layer_map = {} # Cache for created layers.\n tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}\n if input_tensors is None:\n # Create placeholders to build the model on top of.\n input_layers = []\n input_tensors = []\n for layer in model.input_layers:\n input_tensor = Input(batch_shape=layer.batch_input_shape,\n dtype=layer.dtype,\n sparse=layer.sparse,\n name=layer.name)\n input_tensors.append(input_tensor)\n # Cache newly created input layer.\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[layer] = newly_created_input_layer\n for original_input_layer, cloned_input_layer in zip(model.input_layers, input_layers):\n layer_map[original_input_layer] = cloned_input_layer\n else:\n # Make sure that all input tensors come from a Keras layer.\n # If tensor comes from an input layer: cache the input layer.\n input_tensors = to_list(input_tensors)\n _input_tensors = []\n for i, x in enumerate(input_tensors):\n if not K.is_keras_tensor(x):\n name = model.input_layers[i].name\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + name)\n _input_tensors.append(input_tensor)\n # Cache newly created input layer.\n original_input_layer = x._keras_history[0]\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[original_input_layer] = newly_created_input_layer\n else:\n _input_tensors.append(x)\n input_tensors = _input_tensors\n\n for x, y in zip(model.inputs, input_tensors):\n tensor_map[x] = (y, None) # tensor, mask\n\n # Iterated over every node in the reference model, in depth order.\n depth_keys = list(model._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n for depth in depth_keys:\n nodes = model._nodes_by_depth[depth]\n for node in nodes:\n # Recover the corresponding layer.\n layer = node.outbound_layer\n\n # Get or create layer.\n if layer not in layer_map:\n # Clone layer.\n new_layer = layer.__class__.from_config(layer.get_config())\n layer_map[layer] = new_layer\n layer = new_layer\n else:\n # Reuse previously cloned layer.\n layer = layer_map[layer]\n # Don't call InputLayer multiple times.\n if isinstance(layer, InputLayer):\n continue\n\n # Gather inputs to call the new layer.\n reference_input_tensors = node.input_tensors\n reference_output_tensors = node.output_tensors\n\n # If all previous input tensors are available in tensor_map,\n # then call node.inbound_layer on them.\n computed_data = [] # List of tuples (input, mask).\n for x in reference_input_tensors:\n if x in tensor_map:\n computed_data.append(tensor_map[x])\n\n if len(computed_data) == len(reference_input_tensors):\n # Call layer.\n if node.arguments:\n kwargs = node.arguments\n else:\n kwargs = {}\n if len(computed_data) == 1:\n computed_tensor, computed_mask = computed_data[0]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_mask\n output_tensors = to_list(\n layer(computed_tensor, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensor,\n computed_mask))\n computed_tensors = [computed_tensor]\n computed_masks = [computed_mask]\n else:\n computed_tensors = [x[0] for x in computed_data]\n computed_masks = [x[1] for x in computed_data]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_masks\n output_tensors = to_list(\n layer(computed_tensors, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensors,\n computed_masks))\n # Update tensor_map.\n for x, y, mask in zip(reference_output_tensors,\n output_tensors,\n output_masks):\n tensor_map[x] = (y, mask)\n\n # Check that we did compute the model outputs,\n # then instantiate a new model from inputs and outputs.\n output_tensors = []\n for x in model.outputs:\n assert x in tensor_map, 'Could not compute output ' + str(x)\n tensor, _ = tensor_map[x]\n output_tensors.append(tensor)\n return Model(input_tensors, output_tensors, name=model.name)", "def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model", "def clone(self, camera = None, light = None):\r\n newModel = Model(file_string = \"__clone__\", x=self.unif[0], y=self.unif[1], z=self.unif[2],\r\n rx=self.unif[3], ry=self.unif[4], rz=self.unif[5], sx=self.unif[6], sy=self.unif[7], sz=self.unif[8],\r\n cx=self.unif[9], cy=self.unif[10], cz=self.unif[11])\r\n newModel.buf = self.buf\r\n newModel.vGroup = self.vGroup\r\n newModel.shader = self.shader\r\n newModel.textures = self.textures\r\n return newModel", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone", "def sequential_model():\n model = build_models()\n seq_model = Sequential(model[0]['layers'], name=model[0]['name'])\n return seq_model", "def clone(self,\n from_model: entities.Model,\n model_name: str,\n dataset: entities.Dataset = None,\n configuration: dict = None,\n status=None,\n scope=None,\n project_id: str = None,\n labels: list = None,\n description: str = None,\n tags: list = None,\n train_filter: entities.Filters = None,\n validation_filter: entities.Filters = None,\n ) -> entities.Model:\n from_json = {\"name\": model_name,\n \"packageId\": from_model.package_id,\n \"configuration\": from_model.configuration,\n \"metadata\": from_model.metadata,\n \"outputType\": from_model.output_type,\n \"inputType\": from_model.input_type}\n if project_id is None:\n project_id = self.project.id\n from_json['projectId'] = project_id\n if dataset is not None:\n if labels is None:\n labels = list(dataset.labels_flat_dict.keys())\n from_json['datasetId'] = dataset.id\n if labels is not None:\n from_json['labels'] = labels\n # if there are new labels - pop the mapping from the original\n _ = from_json['configuration'].pop('id_to_label_map', None)\n _ = from_json['configuration'].pop('label_to_id_map', None)\n if configuration is not None:\n from_json['configuration'].update(configuration)\n if description is not None:\n from_json['description'] = description\n if tags is not None:\n from_json['tags'] = tags\n if scope is not None:\n from_json['scope'] = scope\n if status is not None:\n from_json['status'] = status\n\n metadata = self._set_model_filter(metadata=from_model.metadata,\n train_filter=train_filter,\n validation_filter=validation_filter)\n if metadata['system']:\n from_json['metadata'] = metadata\n success, response = self._client_api.gen_request(req_type='post',\n path='/ml/models/{}/clone'.format(from_model.id),\n json_req=from_json)\n if not success:\n raise exceptions.PlatformException(response)\n new_model = entities.Model.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n package=from_model._package)\n\n if new_model._dataset is not None and new_model._dataset.readonly is False:\n logger.warning(\n \"Model is using an unlocked dataset {!r}. Make it readonly for training reproducibility\".format(\n new_model.dataset.name))\n\n return new_model", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def copy(self):\n model = LBM(\n n_row_clusters=self.n_row_clusters,\n n_column_clusters=self.n_column_clusters,\n max_iter=self.max_iter,\n n_init=self.n_init,\n n_init_total_run=self.n_init_total_run,\n n_iter_early_stop=self.nb_iter_early_stop,\n rtol=self.rtol,\n atol=self.atol,\n verbosity=self.verbosity,\n use_gpu=self.use_gpu,\n gpu_index=self.gpu_index,\n )\n model._nb_rows = self._nb_rows\n model._nb_cols = self._nb_cols\n model.loglikelihood_ = self.loglikelihood_\n model._np = self._np\n model._cupyx = self._cupyx\n model.trained_successfully_ = self.trained_successfully_\n model.pi_ = copy.copy(self.pi_)\n model.alpha_1_ = copy.copy(self.alpha_1_)\n model.alpha_2_ = copy.copy(self.alpha_2_)\n model.tau_1_ = copy.copy(self.tau_1_)\n model.tau_2_ = copy.copy(self.tau_2_)\n return model", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN", "def clone(self):\n return _libsbml.ModelDefinition_clone(self)", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def clone(self):\r\n obj = CylinderModel()\r\n obj.params = copy.deepcopy(self.params)\r\n return obj", "def clone(self):\n return _libsbml.Submodel_clone(self)", "def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def copy_model_state(model):\n model_state = deepcopy(model.state_dict())\n return model_state", "def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)", "def iris():\n n = keras.models.clone_model(iris_model)\n n.compile('adam', 'sparse_categorical_crossentropy')\n return n", "def clone(self):\n return _libsbml.Input_clone(self)", "def copy(self) -> \"Pipeline\":\n model = PipelineModel(self._config.as_dict(), vocab=copy.deepcopy(self.vocab))\n config = copy.deepcopy(self._config)\n\n pipeline_copy = Pipeline(model, config)\n pipeline_copy._model.load_state_dict(self._model.state_dict())\n\n return pipeline_copy", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def make_non_parallel_copy(model):\n def replace_data_parallel(container):\n for name, module in container.named_children():\n if isinstance(module, nn.DataParallel):\n setattr(container, name, module.module)\n if has_children(module):\n replace_data_parallel(module)\n\n # Make a copy of the model, because we're going to change it\n new_model = deepcopy(model)\n if isinstance(new_model, nn.DataParallel):\n new_model = new_model.module\n replace_data_parallel(new_model)\n\n return new_model", "def clone(self, **kwargs):\n return attr.evolve(self, **kwargs)", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def clones(module, num_copies):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])", "def clone(self) -> 'State':\n return State(self.sim, state=self.get_state().copy())", "def _try_clone_model(model):\n try:\n return copy.deepcopy(model)\n except Exception:\n warnings.warn(\n \"Failed to clone model. Model state might be mutated during verification.\"\n )\n return model", "def clone(self):\n return _libsbml.ListOfInputs_clone(self)", "def create_model():\n\n # Create a sequential model (a simple NN is created) adding a softmax activation at the end with 10 units:\n model = Sequential()\n model.add(Dense(units=128, activation=\"relu\", input_shape=(784,)))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=10, activation=\"softmax\"))\n\n # Compile the model using the loss function \"categorical_crossentropy\" and Stocastic Gradient Descent optimizer:\n model.compile(optimizer=SGD(0.001), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # Return the created model\n return model", "def create_model():\n model = Sequential()\n\n model.add(Dense(18, input_dim=9, kernel_initializer='normal', activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(9, kernel_initializer='normal', activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(1, kernel_initializer='normal'))\n\n learning_rate = 0.001\n momentum = 0.8\n sgd = SGD(lr=learning_rate, momentum=momentum, nesterov=False)\n model.compile(loss='mean_squared_error', optimizer=sgd)\n model.summary()\n return model", "def clone( self ):\n new = copy( self )\n try: del new.input_changed\n except AttributeError: pass\n return new", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def get_model(keep_ratio=0.25):\n model = Sequential()\n # Standardize images\n model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\n # Crop images\n model.add(Cropping2D(cropping=((70,25),(0,0)))) #crop images to isolate road lines\n\n # Convolution layer\n model.add(Convolution2D(24,5,5,subsample=(2,2),activation='relu'))\n # Convolution layer\n model.add(Convolution2D(36,5,5,subsample=(2,2),activation='relu'))\n # Convolution layer\n model.add(Convolution2D(48,5,5,subsample=(2,2),activation='relu'))\n # Convolution layer\n model.add(Convolution2D(64,3,3,activation='relu'))\n # Convolution layer\n model.add(Convolution2D(64,3,3,activation='relu'))\n\n # Flatten layers\n model.add(Flatten())\n model.add(Dense(100))\n # Fully-connected layer with Dropout to avoid over-fitting\n model.add(Dropout(keep_ratio))\n model.add(Dense(50))\n # Fully-connected layer with Dropout to avoid over-fitting\n model.add(Dropout(keep_ratio))\n model.add(Dense(20))\n # Fully-connected layer with Dropout to avoid over-fitting\n model.add(Dropout(keep_ratio))\n model.add(Dense(10))\n model.add(Dense(1))\n return model", "def clone(self):\n return _libsbml.ListOfSubmodels_clone(self)", "def copy_state(model):\n copy_dict = OrderedDict()\n state_dict = model.state_dict()\n for k, v in state_dict.items():\n copy_dict[k] = v.cpu() if v.is_cuda else v.clone()\n\n return copy_dict", "def make_model():\n model = Sequential()\n model.add(Dense(1000, input_shape=(INPUT_SIZE,), activation='relu'))\n model.add(Dense(1000, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', metrics=['accuracy'])\n return model", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def _model(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n\t\tmodel.add(Dense(units=32, activation=\"relu\"))\n\t\tmodel.add(Dense(units=16, activation=\"relu\"))\n\t\tmodel.add(Dense(units=8, activation=\"relu\"))\n\t\tmodel.add(Dense(self.action_size, activation=\"linear\"))\n\t\tmodel.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n\n\t\treturn model", "def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model", "def clone(self, *args):\n return _osgAnimation.Bone_clone(self, *args)", "def clone(self):\n\n clone = self.__class__.__new__(self.__class__)\n clone._graph_state = self._graph_state\n clone._molecule_state = self._molecule_state\n return clone", "def train_model(self, model, data) -> keras.Model:\n self.history = model.fit(\n self.generator.flow(data.x.train, data.y.train),\n epochs=self.N_epochs,\n validation_data=(data.x.valid, data.y.valid),\n verbose=1,\n steps_per_epoch=int(np.floor(data.x.train.shape[0] / self.batch_size)),\n callbacks=self.callbacks,\n shuffle=True,\n )\n\n return model", "def clone(self):\n return attr.evolve(self, env=dict(self._env))", "def randomize_layers(nb_layers, old_model, model_type='Model'):\n \n config = old_model.get_config()\n if model_type=='Model':\n new_model = Model.from_config(config)\n elif model_type=='Sequential':\n new_model = Sequential.from_config(config)\n else:\n print('Wrong parameter, model can only be Sequential or Model.')\n\n if nb_layers==-1:\n nb_layers = len(new_model.layers)\n else:\n nb_layers = min(nb_layers, len(new_model.layers))\n\n # Copy the weights of the non-randomized layers.\n for layer_i in range(len(new_model.layers) - nb_layers):\n new_model.layers[layer_i].set_weights(old_model.layers[layer_i].get_weights())\n\n del old_model\n\n return new_model", "def clone(self, first_order=None, allow_unused=None, allow_nograd=None):\n if first_order is None:\n first_order = self.first_order\n if allow_unused is None:\n allow_unused = self.allow_unused\n if allow_nograd is None:\n allow_nograd = self.allow_nograd\n return MAMLpp(\n clone_module(self.module),\n lr=self.lr,\n lrs=clone_named_parameters(self.lrs),\n first_order=first_order,\n allow_unused=allow_unused,\n allow_nograd=allow_nograd,\n )", "def clone(self, *args):\n return _SALOMERuntime.InputPyPort_clone(self, *args)", "def model(self):\n model = tf.keras.Model(\n inputs=[self.input_user, self.input_item], outputs=self.predict())\n return model", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def create_model():\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(1024, 1024, 1)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.25))\n\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.25))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(2, activation='softmax'))\n\n model.summary()\n\n model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n return model", "def clone(self, *args):\n return _SALOMERuntime.InputCorbaPort_clone(self, *args)", "def clone(self):\n return self.__class__(self.name, *self)", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def clone(self, *args):\n return _SALOMERuntime.InputStudyPort_clone(self, *args)", "def clone(self):\n # make copies of my state\n beta = self.beta\n theta = self.theta.clone()\n sigma = self.sigma.clone()\n likelihoods = self.prior.clone(), self.data.clone(), self.posterior.clone()\n\n # make one and return it\n return type(self)(beta=beta, theta=theta, likelihoods=likelihoods, sigma=sigma)", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clone(self, *args, **kwargs):\n return self.copy().reset(*args, **kwargs)", "def clone(self):\n return _libsbml.ExternalModelDefinition_clone(self)", "def clone(self):\n raise NotImplementedError", "def create_model(training, layers, activation='relu',\n loss='mean_absolute_error', optimizer='adam'):\n\n model = Sequential()\n\n # Add the first layer with the input dimension\n model.add(Dense(layers[0], activation=activation, input_dim=1))\n\n for number_of_neurons in layers[1:]:\n model.add(Dense(number_of_neurons, activation=activation))\n\n # Specify that there is only one returned value\n model.add(Dense(1, activation=activation))\n\n model.compile(loss=loss, optimizer=optimizer)\n\n return model", "def replicate_model_state(model_states: TrainState) -> TrainState:\n return jax.device_put_replicated(model_states, jax.local_devices())", "def build_model(self):\n self.model = models.Sequential()\n self.model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='mse', metrics=['mae'])\n self.model.add(layers.Flatten())\n self.model.add(layers.Dense(64, activation='relu'))\n self.model.add(layers.Dense(10, activation='softmax'))\n self.model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "def create_cnn_model() -> tf.keras.Sequential:\n model = Sequential(\n [\n layers.Convolution1D(filters=16, kernel_size=7, input_shape=[186, 1]),\n layers.LeakyReLU(),\n layers.MaxPool1D(pool_size=2),\n layers.Convolution1D(filters=16, kernel_size=5),\n layers.LeakyReLU(),\n layers.Convolution1D(filters=16, kernel_size=5),\n layers.LeakyReLU(),\n layers.Convolution1D(filters=16, kernel_size=5),\n layers.LeakyReLU(),\n layers.MaxPool1D(pool_size=2),\n layers.Flatten(),\n layers.Dense(128),\n layers.LeakyReLU(),\n layers.Dense(5, activation=\"softmax\"),\n ]\n )\n return model", "def clone(*args, **kwds) -> None:\n command = command_clone(*args, **kwds)\n io.communicate(command)", "def clone(self, **kwargs):\n new_inst = MetaTensor(self.as_tensor().clone(**kwargs))\n new_inst.__dict__ = deepcopy(self.__dict__)\n return new_inst", "def build_model(nx, layers, activations, lambtha, keep_prob):\n model = K.Sequential()\n for i in range(len(layers)):\n model.add(K.layers.Dense(layers[i],\n activation=activations[i],\n input_shape=(nx,),\n kernel_regularizer=K.regularizers.l2(lambtha)))\n if i + 1 < len(layers):\n model.add(K.layers.Dropout(1 - keep_prob))\n return model", "def create_model_tail(self, model):\n # Creating a sequential model to at as top layers\n top_model = keras.Sequential()\n top_model.add(keras.layers.Flatten(input_shape=model.output_shape[1:]))\n\n # Add multiple layers\n for layer_num, layer_dim in enumerate(self.num_nodes):\n top_model.add(keras.layers.Dense(layer_dim, activation=self.activation))\n top_model.add(keras.layers.Dropout(self.dropouts[layer_num]))\n \n top_model.add(keras.layers.Dense(self.n_classes, activation=self.output_act))\n return top_model", "def clone(self):\n return _libsbml.ListOfModelDefinitions_clone(self)", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def copy_model_parameters(sess, net1, net2):\n\n copy_scope_parameters(sess, net1.scope, net2.scope)", "def clone(self):\n return attr.evolve(self)", "def nn_model():\n seed = 321\n np.random.seed(seed)\n rmsprop = RMSprop(lr=0.0001)\n # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\n # for train, test in kfold.split(X, y):\n model_nn = Sequential()\n model_nn.add(Dense(100, input_shape=(117,), activation='relu'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(125, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(30, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(1, activation='sigmoid'))#softmax\n model_nn.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n #model_nn.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n # Compile model\n model_nn.compile(optimizer=rmsprop, loss='binary_crossentropy', metrics=['accuracy'])\n return model_nn", "def create_model_net(n_input,n_hidden,n_output):\n net = Sequential(\n L.Linear(n_input, n_hidden), F.relu,\n L.Linear(n_hidden, n_hidden), F.relu,\n L.Linear(n_hidden, n_output), F.softmax)\n return net", "def train_model(network, data, labels, batch_size, epochs,\n validation_data=None, verbose=True, shuffle=False):\n history = network.fit(x=data, y=labels,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=validation_data,\n verbose=verbose,\n shuffle=shuffle)\n return history", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def assign_model(original_model: nn.Module, pruned_model: nn.Module, cfg_mask: list) -> nn.Module:\n old_modules = list(original_model.named_modules())\n new_modules = list(pruned_model.named_modules())\n assert len(old_modules) == len(\n new_modules), f\"expected equal module nums, got {len(old_modules)} v.s. {len(new_modules)}\"\n\n first_linear = True\n bn_idx = 0 # the index of output bn mask for conv layers\n for i in range(len(old_modules)):\n old_name, old_module = old_modules[i]\n new_name, new_module = new_modules[i]\n\n assert old_name == new_name, f\"Expected same module name, got {old_name} and {new_name}\"\n\n if isinstance(old_module, nn.BatchNorm2d) or isinstance(old_module, nn.BatchNorm1d):\n idx = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx.size == 1:\n idx = np.resize(idx, (1,))\n\n assert new_module.weight.data.shape == old_module.weight.data[idx.tolist()].clone().shape\n new_module.weight.data = old_module.weight.data[idx.tolist()].clone()\n new_module.bias.data = old_module.bias.data[idx.tolist()].clone()\n new_module.running_mean = old_module.running_mean[idx.tolist()].clone()\n new_module.running_var = old_module.running_var[idx.tolist()].clone()\n\n bn_idx += 1\n pass\n elif isinstance(old_module, nn.Conv2d):\n old_conv_weight = old_module.weight.clone()\n if old_module.bias is not None:\n old_conv_bias = old_module.bias.clone()\n else:\n old_conv_bias = None\n\n # prune input dim\n if bn_idx - 1 != -1:\n # -1 is the first layer of conv, do not prune the input dimension\n idx_input = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx - 1].cpu().numpy())))\n if idx_input.size == 1:\n idx_input = np.resize(idx_input, (1,))\n old_conv_weight = old_conv_weight.data[:, idx_input.tolist(), :, :].clone()\n # prune output dim\n idx_output = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx_output.size == 1:\n idx_output = np.resize(idx_output, (1,))\n old_conv_weight = old_conv_weight.data[idx_output.tolist(), :, :, :].clone()\n if old_conv_bias:\n old_conv_bias = old_conv_bias.data[idx_output.tolist()].clone()\n\n assert old_conv_weight.shape == new_module.weight.shape, f\"Expected same shape to assign, got {old_conv_weight.shape} and {new_module.weight.shape}\"\n if old_conv_bias:\n assert old_conv_bias.shape == new_module.bias.shape, f\"Expected same shape to assigin, got {old_conv_bias.shape} and {new_module.bias.shape}\"\n\n new_module.weight.data = old_conv_weight.clone()\n if old_conv_bias:\n new_module.bias.data = old_conv_bias.clone()\n\n elif isinstance(old_module, nn.Linear):\n old_linear_weight = old_module.weight.clone()\n old_linear_bias = old_module.bias.clone()\n\n # prune the input dimension\n idx_input = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx - 1].cpu().numpy())))\n if idx_input.size == 1:\n idx_input = np.resize(idx_input, (1,))\n # if first_linear:\n # def gen_list(offset):\n # base_list = np.arange(7 * 7)\n # return base_list + offset * 49\n #\n # idx_input = [gen_list(x) for x in idx_input]\n # idx_input = np.concatenate(idx_input)\n # idx_input = np.sort(idx_input)\n # first_linear = False\n old_linear_weight = old_linear_weight.data[:, idx_input.tolist()].clone()\n\n # prune output layer\n if bn_idx == len(cfg_mask):\n # do not prune the output layer\n idx_output = np.arange(old_linear_weight.shape[0])\n else:\n # prune output dim\n idx_output = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx_output.size == 1:\n idx_output = np.resize(idx_output, (1,))\n old_linear_weight = old_linear_weight.data[idx_output.tolist(), :].clone()\n old_linear_bias = old_linear_bias.data[idx_output.tolist()].clone()\n\n assert old_linear_weight.shape == new_module.weight.shape, f\"Expected same shape to assign, got {old_conv_weight.shape} and {new_module.weight.shape}\"\n assert old_linear_bias.shape == new_module.bias.shape, f\"Expected same shape to assigin, got {old_conv_bias.shape} and {new_module.bias.shape}\"\n\n new_module.weight.data = old_linear_weight.clone()\n new_module.bias.data = old_linear_bias.clone()\n\n return pruned_model", "def clone(self):\n return _libsbml.Port_clone(self)", "def keras_model_1(X_train, y_train, max_epochs=20, batch_size=16, train_size=0.85):\n num_classes = len(np.unique(y_train))\n num_features = X_train.shape[1]\n\n print(\"Building model...\")\n\n model = Sequential()\n model.add(Dense(num_features, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, num_classes, init='glorot_uniform'))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.4, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd)\n\n print(\"Training model...\")\n X = X_train\n y = np_utils.to_categorical(y_train)\n history = model.fit(X, y, nb_epoch=max_epochs, batch_size=batch_size, verbose=2, validation_split=1-train_size, show_accuracy=True)\n\n return model, history", "def clone(self, *args):\n return _osgAnimation.Animation_clone(self, *args)", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def clones(module: nn.Module, n: int) -> nn.ModuleList:\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def get_model(imgsize: ImageSize, classes_count: int) -> keras.Model:\n data_augmentation = keras.Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\"horizontal\", input_shape=(imgsize.height, imgsize.width, 3)),\n layers.experimental.preprocessing.RandomRotation(0.1),\n layers.experimental.preprocessing.RandomZoom(0.1),\n ]\n )\n\n model = Sequential([\n data_augmentation,\n layers.experimental.preprocessing.Rescaling(1. / 255),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Dropout(0.2),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(classes_count)\n ])\n\n model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n return model", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def clone(self):\n return self.copy()" ]
[ "0.7385709", "0.66616267", "0.6578116", "0.6526493", "0.6467478", "0.62646365", "0.62586915", "0.6220581", "0.6103913", "0.60691476", "0.6056226", "0.6018872", "0.6015779", "0.6011683", "0.5961887", "0.5907166", "0.58924145", "0.5868687", "0.58646977", "0.584091", "0.58357793", "0.5659199", "0.5609348", "0.55329376", "0.5524156", "0.55141765", "0.5498603", "0.5490629", "0.54903555", "0.5490342", "0.5473767", "0.5472083", "0.5452118", "0.5433239", "0.5429733", "0.5414403", "0.5390009", "0.5374494", "0.5339394", "0.5326307", "0.5316779", "0.53166896", "0.53166896", "0.53166896", "0.53166896", "0.5306387", "0.5262748", "0.5253223", "0.52361906", "0.5227664", "0.5221133", "0.521726", "0.5209979", "0.51811236", "0.51722586", "0.51668406", "0.5164817", "0.515747", "0.5157154", "0.5154796", "0.5153449", "0.5134207", "0.5130318", "0.5125128", "0.51171917", "0.5115843", "0.51154053", "0.51136327", "0.5110568", "0.50981516", "0.50981516", "0.50981516", "0.50959474", "0.5092789", "0.5090594", "0.5080395", "0.5076125", "0.5073877", "0.50702804", "0.5063082", "0.5052437", "0.50474906", "0.50368863", "0.50360584", "0.5025738", "0.5018589", "0.5018546", "0.5008587", "0.5006714", "0.5005809", "0.5004862", "0.49981448", "0.49867025", "0.4986699", "0.4983088", "0.49700037", "0.49687016", "0.49635395", "0.49563825", "0.4955194" ]
0.7651821
0
Clone any `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments
def clone_model(model, input_tensors=None): if isinstance(model, Sequential): return _clone_sequential_model(model, input_tensors=input_tensors) else: return _clone_functional_model(model, input_tensors=input_tensors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):\n return _libsbml.Model_clone(self)", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def _clone_sequential_model(model, input_tensors=None):\n if not isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a `Sequential` model instance, '\n 'but got:', model)\n\n def clone(layer):\n return layer.__class__.from_config(layer.get_config())\n\n layers = [clone(layer) for layer in model.layers]\n if input_tensors is None:\n return Sequential(layers=layers, name=model.name)\n else:\n if len(to_list(input_tensors)) != 1:\n raise ValueError('To clone a `Sequential` model, we expect '\n ' at most one tensor '\n 'as part of `input_tensors`.')\n x = to_list(input_tensors)[0]\n if K.is_keras_tensor(x):\n origin_layer = x._keras_history[0]\n if isinstance(origin_layer, InputLayer):\n return Sequential(layers=[origin_layer] + layers,\n name=model.name)\n else:\n raise ValueError('Cannot clone a `Sequential` model on top '\n 'of a tensor that comes from a Keras layer '\n 'other than an `InputLayer`. '\n 'Use the functional API instead.')\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + str(x.name))\n input_layer = input_tensor._keras_history[0]\n return Sequential(layers=[input_layer] + layers, name=model.name)", "def clone(self, camera = None, light = None):\r\n newModel = Model(file_string = \"__clone__\", x=self.unif[0], y=self.unif[1], z=self.unif[2],\r\n rx=self.unif[3], ry=self.unif[4], rz=self.unif[5], sx=self.unif[6], sy=self.unif[7], sz=self.unif[8],\r\n cx=self.unif[9], cy=self.unif[10], cz=self.unif[11])\r\n newModel.buf = self.buf\r\n newModel.vGroup = self.vGroup\r\n newModel.shader = self.shader\r\n newModel.textures = self.textures\r\n return newModel", "def clone_keras_model(target, custom_objects=None):\n new_model = model_from_json(target.to_json(),custom_objects)\n new_model.set_weights(target.get_weights())\n return new_model", "def clone(self,\n from_model: entities.Model,\n model_name: str,\n dataset: entities.Dataset = None,\n configuration: dict = None,\n status=None,\n scope=None,\n project_id: str = None,\n labels: list = None,\n description: str = None,\n tags: list = None,\n train_filter: entities.Filters = None,\n validation_filter: entities.Filters = None,\n ) -> entities.Model:\n from_json = {\"name\": model_name,\n \"packageId\": from_model.package_id,\n \"configuration\": from_model.configuration,\n \"metadata\": from_model.metadata,\n \"outputType\": from_model.output_type,\n \"inputType\": from_model.input_type}\n if project_id is None:\n project_id = self.project.id\n from_json['projectId'] = project_id\n if dataset is not None:\n if labels is None:\n labels = list(dataset.labels_flat_dict.keys())\n from_json['datasetId'] = dataset.id\n if labels is not None:\n from_json['labels'] = labels\n # if there are new labels - pop the mapping from the original\n _ = from_json['configuration'].pop('id_to_label_map', None)\n _ = from_json['configuration'].pop('label_to_id_map', None)\n if configuration is not None:\n from_json['configuration'].update(configuration)\n if description is not None:\n from_json['description'] = description\n if tags is not None:\n from_json['tags'] = tags\n if scope is not None:\n from_json['scope'] = scope\n if status is not None:\n from_json['status'] = status\n\n metadata = self._set_model_filter(metadata=from_model.metadata,\n train_filter=train_filter,\n validation_filter=validation_filter)\n if metadata['system']:\n from_json['metadata'] = metadata\n success, response = self._client_api.gen_request(req_type='post',\n path='/ml/models/{}/clone'.format(from_model.id),\n json_req=from_json)\n if not success:\n raise exceptions.PlatformException(response)\n new_model = entities.Model.from_json(_json=response.json(),\n client_api=self._client_api,\n project=self._project,\n package=from_model._package)\n\n if new_model._dataset is not None and new_model._dataset.readonly is False:\n logger.warning(\n \"Model is using an unlocked dataset {!r}. Make it readonly for training reproducibility\".format(\n new_model.dataset.name))\n\n return new_model", "def copy(self):\r\n clone = NeuralNet()\r\n for layer in self.layers:\r\n clone.layers.append(layer.copy())\r\n return clone", "def make_cloning_model(input_shape=(66, 200, 3)):\n # Create the Sequential model\n print(\"input shape\", input_shape)\n model = Sequential()\n model.add(Lambda(lambda x: x / 128. - 1., output_shape=input_shape, input_shape=input_shape))\n add_conv_type1(model, 12, input_shape)\n add_conv_type1(model, 18)\n add_conv_type1(model, 24)\n add_conv_type2(model, 30)\n add_conv_type2(model, 30)\n model.add(Flatten(input_shape=(13, 33, 30)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n return model", "def copy(self):\n copyPreprocessors = []\n copyModels = []\n try:\n #package is defined here once and passed to _cloneObject.\n #When further modules are required, further imports will be necessary\n moduleObject = {\"sklearn\": importlib.import_module(\"sklearn.base\")}\n except(ImportError):\n moduleObject = None\n for preprocessor in self.preprocessors:\n copyPrep = self._cloneObject(preprocessor, moduleObject=moduleObject)\n copyPreprocessors.append(copyPrep)\n\n for model in self.models:\n copyModel = self._cloneObject(model, moduleObject=moduleObject)\n copyModels.append(copyModel)\n return Layer(models=copyModels, preprocessors=copyPreprocessors)", "def clone(self):\n return _libsbml.ModelDefinition_clone(self)", "def _clone_functional_model(model, input_tensors=None):\n if not isinstance(model, Model):\n raise ValueError('Expected `model` argument '\n 'to be a `Model` instance, got ', model)\n if isinstance(model, Sequential):\n raise ValueError('Expected `model` argument '\n 'to be a functional `Model` instance, '\n 'got a `Sequential` instance instead:', model)\n\n layer_map = {} # Cache for created layers.\n tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}\n if input_tensors is None:\n # Create placeholders to build the model on top of.\n input_layers = []\n input_tensors = []\n for layer in model.input_layers:\n input_tensor = Input(batch_shape=layer.batch_input_shape,\n dtype=layer.dtype,\n sparse=layer.sparse,\n name=layer.name)\n input_tensors.append(input_tensor)\n # Cache newly created input layer.\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[layer] = newly_created_input_layer\n for original_input_layer, cloned_input_layer in zip(model.input_layers, input_layers):\n layer_map[original_input_layer] = cloned_input_layer\n else:\n # Make sure that all input tensors come from a Keras layer.\n # If tensor comes from an input layer: cache the input layer.\n input_tensors = to_list(input_tensors)\n _input_tensors = []\n for i, x in enumerate(input_tensors):\n if not K.is_keras_tensor(x):\n name = model.input_layers[i].name\n input_tensor = Input(tensor=x,\n name='input_wrapper_for_' + name)\n _input_tensors.append(input_tensor)\n # Cache newly created input layer.\n original_input_layer = x._keras_history[0]\n newly_created_input_layer = input_tensor._keras_history[0]\n layer_map[original_input_layer] = newly_created_input_layer\n else:\n _input_tensors.append(x)\n input_tensors = _input_tensors\n\n for x, y in zip(model.inputs, input_tensors):\n tensor_map[x] = (y, None) # tensor, mask\n\n # Iterated over every node in the reference model, in depth order.\n depth_keys = list(model._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n for depth in depth_keys:\n nodes = model._nodes_by_depth[depth]\n for node in nodes:\n # Recover the corresponding layer.\n layer = node.outbound_layer\n\n # Get or create layer.\n if layer not in layer_map:\n # Clone layer.\n new_layer = layer.__class__.from_config(layer.get_config())\n layer_map[layer] = new_layer\n layer = new_layer\n else:\n # Reuse previously cloned layer.\n layer = layer_map[layer]\n # Don't call InputLayer multiple times.\n if isinstance(layer, InputLayer):\n continue\n\n # Gather inputs to call the new layer.\n reference_input_tensors = node.input_tensors\n reference_output_tensors = node.output_tensors\n\n # If all previous input tensors are available in tensor_map,\n # then call node.inbound_layer on them.\n computed_data = [] # List of tuples (input, mask).\n for x in reference_input_tensors:\n if x in tensor_map:\n computed_data.append(tensor_map[x])\n\n if len(computed_data) == len(reference_input_tensors):\n # Call layer.\n if node.arguments:\n kwargs = node.arguments\n else:\n kwargs = {}\n if len(computed_data) == 1:\n computed_tensor, computed_mask = computed_data[0]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_mask\n output_tensors = to_list(\n layer(computed_tensor, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensor,\n computed_mask))\n computed_tensors = [computed_tensor]\n computed_masks = [computed_mask]\n else:\n computed_tensors = [x[0] for x in computed_data]\n computed_masks = [x[1] for x in computed_data]\n if has_arg(layer.call, 'mask'):\n if 'mask' not in kwargs:\n kwargs['mask'] = computed_masks\n output_tensors = to_list(\n layer(computed_tensors, **kwargs))\n output_masks = to_list(\n layer.compute_mask(computed_tensors,\n computed_masks))\n # Update tensor_map.\n for x, y, mask in zip(reference_output_tensors,\n output_tensors,\n output_masks):\n tensor_map[x] = (y, mask)\n\n # Check that we did compute the model outputs,\n # then instantiate a new model from inputs and outputs.\n output_tensors = []\n for x in model.outputs:\n assert x in tensor_map, 'Could not compute output ' + str(x)\n tensor, _ = tensor_map[x]\n output_tensors.append(tensor)\n return Model(input_tensors, output_tensors, name=model.name)", "def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy", "def clone(self):\r\n obj = CylinderModel()\r\n obj.params = copy.deepcopy(self.params)\r\n return obj", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def copy(self):\n model = LBM(\n n_row_clusters=self.n_row_clusters,\n n_column_clusters=self.n_column_clusters,\n max_iter=self.max_iter,\n n_init=self.n_init,\n n_init_total_run=self.n_init_total_run,\n n_iter_early_stop=self.nb_iter_early_stop,\n rtol=self.rtol,\n atol=self.atol,\n verbosity=self.verbosity,\n use_gpu=self.use_gpu,\n gpu_index=self.gpu_index,\n )\n model._nb_rows = self._nb_rows\n model._nb_cols = self._nb_cols\n model.loglikelihood_ = self.loglikelihood_\n model._np = self._np\n model._cupyx = self._cupyx\n model.trained_successfully_ = self.trained_successfully_\n model.pi_ = copy.copy(self.pi_)\n model.alpha_1_ = copy.copy(self.alpha_1_)\n model.alpha_2_ = copy.copy(self.alpha_2_)\n model.tau_1_ = copy.copy(self.tau_1_)\n model.tau_2_ = copy.copy(self.tau_2_)\n return model", "def copy(self):\r\n clone = NeuralNetLayer(self.input_size, self.output_size)\r\n clone.weights = self.weights.copy()\r\n return clone", "def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model", "def deepcopy(self):\r\n newNN = NeuralNetwork(self.max_epochs, self.loss, self.metric, self.momentum_rate,\r\n self.regularization_rate, self.type, self.batch_size, self.type_classifier)\r\n [newNN.add_layer(layer.deepcopy()) for layer in self.layers]\r\n return newNN", "def clone(self):\n return _libsbml.Submodel_clone(self)", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def clone(self):\n return _libsbml.Input_clone(self)", "def _try_clone_model(model):\n try:\n return copy.deepcopy(model)\n except Exception:\n warnings.warn(\n \"Failed to clone model. Model state might be mutated during verification.\"\n )\n return model", "def clone(self, **kwargs):\n return attr.evolve(self, **kwargs)", "def clone(self):\n return _libsbml.FbcModelPlugin_clone(self)", "def clone(self):\n return _libsbml.ListOfInputs_clone(self)", "def clone(self):\n return _libsbml.ListOfSubmodels_clone(self)", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def clone(self, *args, **kwargs):\n return self.copy().reset(*args, **kwargs)", "def clone(self):\n return _libsbml.MultiModelPlugin_clone(self)", "def clone(self, clone=None):\r\n # copy specs from supplied object\r\n if clone is not None: [setattr(self, v, getattr(clone, v)) for v in vars(clone)]", "def clone(self):\n return self.__class__(self.name, *self)", "def clone(self, *args):\n return _osgAnimation.Bone_clone(self, *args)", "def clone(self):\n return _libsbml.CompModelPlugin_clone(self)", "def clone(self):\n raise NotImplementedError", "def clone(self):\r\n cp = self.__class__(self.op, self.inputs, [output.clone() for output in self.outputs])\r\n cp.tag = copy(self.tag)\r\n return cp", "def clone(self):\n\n clone = self.__class__.__new__(self.__class__)\n clone._graph_state = self._graph_state\n clone._molecule_state = self._molecule_state\n return clone", "def clone(self):\n return _libsbml.ListOfModelDefinitions_clone(self)", "def clone(self, **kwargs):\n new_inst = MetaTensor(self.as_tensor().clone(**kwargs))\n new_inst.__dict__ = deepcopy(self.__dict__)\n return new_inst", "def clone( self ):\n new = copy( self )\n try: del new.input_changed\n except AttributeError: pass\n return new", "def copy_model_state(model):\n model_state = deepcopy(model.state_dict())\n return model_state", "def clones(module, num_copies):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])", "def copy_model(dst_model, src_model, const=0.0):\n \n params_dst = dst_model.named_parameters()\n params_src = src_model.named_parameters()\n dict_params_dst = dict(params_dst)\n with torch.no_grad():\n for name, param in params_src:\n if name in dict_params_dst:\n # NOTE: Must add a dummy float otherwise only setting 'reference' to old param.data\n dict_params_dst[name].set_(param.data + const)", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def clone(self, *args, **kwargs):\n new_self = copy.copy(self)\n kwargs = self.get_arguments(args, kwargs, onlykeys=True, onlyused=True)\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for key in kwargs:\n if _map_parameters is not None and key in _map_parameters:\n setattr(new_self, _map_parameters[key], kwargs[key])\n else:\n setattr(new_self, key, kwargs[key])\n return new_self", "def clone(self):\n # make copies of my state\n beta = self.beta\n theta = self.theta.clone()\n sigma = self.sigma.clone()\n likelihoods = self.prior.clone(), self.data.clone(), self.posterior.clone()\n\n # make one and return it\n return type(self)(beta=beta, theta=theta, likelihoods=likelihoods, sigma=sigma)", "def create_original_model():\n model = Sequential()\n model.add(Embedding(max_features,\n embedding_dims,\n input_length=maxlen))\n model.add(Dropout(0.2))\n model.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(GlobalMaxPooling1D())\n model.add(Dense(hidden_dims))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n model.add(Dense(2))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model", "def clone(self) -> 'State':\n return State(self.sim, state=self.get_state().copy())", "def clone(self):\n return shallow_clone(self)", "def clone(self):\n return _libsbml.ExternalModelDefinition_clone(self)", "def clone(self):", "def clone(self, *args):\n return _SALOMERuntime.InputPyPort_clone(self, *args)", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "def GetClone(self, *args, **kwargs):\n pass", "def clone(self):\n return attr.evolve(self, env=dict(self._env))", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def copy(self, new_name):\n new_model = dill.loads(dill.dumps(self.model))\n model_fn = lambda: new_model\n return self.__class__(new_name, model_fn)", "def clone(self, first_order=None, allow_unused=None, allow_nograd=None):\n if first_order is None:\n first_order = self.first_order\n if allow_unused is None:\n allow_unused = self.allow_unused\n if allow_nograd is None:\n allow_nograd = self.allow_nograd\n return MAMLpp(\n clone_module(self.module),\n lr=self.lr,\n lrs=clone_named_parameters(self.lrs),\n first_order=first_order,\n allow_unused=allow_unused,\n allow_nograd=allow_nograd,\n )", "def clone(self):\n return _libsbml.SBase_clone(self)", "def copy(self) -> \"Pipeline\":\n model = PipelineModel(self._config.as_dict(), vocab=copy.deepcopy(self.vocab))\n config = copy.deepcopy(self._config)\n\n pipeline_copy = Pipeline(model, config)\n pipeline_copy._model.load_state_dict(self._model.state_dict())\n\n return pipeline_copy", "def clone_with_updates(self, **kwargs):\n fields_dict = self.to_dict()\n fields_dict.update(kwargs)\n return BindingPrediction(**fields_dict)", "def clone(self):\n return _libsbml.FbcAnd_clone(self)", "def make_non_parallel_copy(model):\n def replace_data_parallel(container):\n for name, module in container.named_children():\n if isinstance(module, nn.DataParallel):\n setattr(container, name, module.module)\n if has_children(module):\n replace_data_parallel(module)\n\n # Make a copy of the model, because we're going to change it\n new_model = deepcopy(model)\n if isinstance(new_model, nn.DataParallel):\n new_model = new_model.module\n replace_data_parallel(new_model)\n\n return new_model", "def new(self, **kwargs):\n return self.__model__(**self._preprocess_params(kwargs))", "def copy(self):\n brain = Brain((self.structure), activation_function=self.activation_function)\n brain.weights = copy.deepcopy(self.weights)\n brain.biases = copy.deepcopy(self.biases)", "def clone(self):\n return self.copy()", "def clone(self):\n return copy.deepcopy(self)", "def make_clone(self, attrs=None, sub_clone=False):\n attrs = attrs or {}\n if not self.pk:\n raise ValidationError(\n \"{}: Instance must be saved before it can be cloned.\".format(\n self.__class__.__name__\n )\n )\n if sub_clone:\n duplicate = self\n duplicate.pk = None\n else:\n duplicate = self._create_copy_of_instance(self)\n\n for name, value in attrs.items():\n setattr(duplicate, name, value)\n\n duplicate.save()\n\n duplicate = self.__duplicate_o2o_fields(duplicate)\n duplicate = self.__duplicate_o2m_fields(duplicate)\n duplicate = self.__duplicate_m2o_fields(duplicate)\n duplicate = self.__duplicate_m2m_fields(duplicate)\n return duplicate", "def clone(self):\n return self", "def clone(self, name, **kwargs):\n obj = copy.deepcopy(self._object.get(name))\n obj.__dict__.update(kwargs)\n return obj", "def clone(self):\n return attr.evolve(self)", "def clone(self):\n return _libsbml.LocalParameter_clone(self)", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def clone(self):\n return _libsbml.ListOfParameters_clone(self)", "def clone(self) -> Mutator:\n raise NotImplementedError", "def copy_state(model):\n copy_dict = OrderedDict()\n state_dict = model.state_dict()\n for k, v in state_dict.items():\n copy_dict[k] = v.cpu() if v.is_cuda else v.clone()\n\n return copy_dict", "def iris():\n n = keras.models.clone_model(iris_model)\n n.compile('adam', 'sparse_categorical_crossentropy')\n return n", "def _CloneOp(op, new_name, new_inputs):\n inputs = list(op.inputs)\n for new_input in new_inputs:\n inputs[new_input[0]] = new_input[1]\n return _OP_CLONER.Clone(op, inputs, new_name)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def __init__(self, model: Model1D):\n super().__init__(model=model)", "def assign_model(original_model: nn.Module, pruned_model: nn.Module, cfg_mask: list) -> nn.Module:\n old_modules = list(original_model.named_modules())\n new_modules = list(pruned_model.named_modules())\n assert len(old_modules) == len(\n new_modules), f\"expected equal module nums, got {len(old_modules)} v.s. {len(new_modules)}\"\n\n first_linear = True\n bn_idx = 0 # the index of output bn mask for conv layers\n for i in range(len(old_modules)):\n old_name, old_module = old_modules[i]\n new_name, new_module = new_modules[i]\n\n assert old_name == new_name, f\"Expected same module name, got {old_name} and {new_name}\"\n\n if isinstance(old_module, nn.BatchNorm2d) or isinstance(old_module, nn.BatchNorm1d):\n idx = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx.size == 1:\n idx = np.resize(idx, (1,))\n\n assert new_module.weight.data.shape == old_module.weight.data[idx.tolist()].clone().shape\n new_module.weight.data = old_module.weight.data[idx.tolist()].clone()\n new_module.bias.data = old_module.bias.data[idx.tolist()].clone()\n new_module.running_mean = old_module.running_mean[idx.tolist()].clone()\n new_module.running_var = old_module.running_var[idx.tolist()].clone()\n\n bn_idx += 1\n pass\n elif isinstance(old_module, nn.Conv2d):\n old_conv_weight = old_module.weight.clone()\n if old_module.bias is not None:\n old_conv_bias = old_module.bias.clone()\n else:\n old_conv_bias = None\n\n # prune input dim\n if bn_idx - 1 != -1:\n # -1 is the first layer of conv, do not prune the input dimension\n idx_input = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx - 1].cpu().numpy())))\n if idx_input.size == 1:\n idx_input = np.resize(idx_input, (1,))\n old_conv_weight = old_conv_weight.data[:, idx_input.tolist(), :, :].clone()\n # prune output dim\n idx_output = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx_output.size == 1:\n idx_output = np.resize(idx_output, (1,))\n old_conv_weight = old_conv_weight.data[idx_output.tolist(), :, :, :].clone()\n if old_conv_bias:\n old_conv_bias = old_conv_bias.data[idx_output.tolist()].clone()\n\n assert old_conv_weight.shape == new_module.weight.shape, f\"Expected same shape to assign, got {old_conv_weight.shape} and {new_module.weight.shape}\"\n if old_conv_bias:\n assert old_conv_bias.shape == new_module.bias.shape, f\"Expected same shape to assigin, got {old_conv_bias.shape} and {new_module.bias.shape}\"\n\n new_module.weight.data = old_conv_weight.clone()\n if old_conv_bias:\n new_module.bias.data = old_conv_bias.clone()\n\n elif isinstance(old_module, nn.Linear):\n old_linear_weight = old_module.weight.clone()\n old_linear_bias = old_module.bias.clone()\n\n # prune the input dimension\n idx_input = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx - 1].cpu().numpy())))\n if idx_input.size == 1:\n idx_input = np.resize(idx_input, (1,))\n # if first_linear:\n # def gen_list(offset):\n # base_list = np.arange(7 * 7)\n # return base_list + offset * 49\n #\n # idx_input = [gen_list(x) for x in idx_input]\n # idx_input = np.concatenate(idx_input)\n # idx_input = np.sort(idx_input)\n # first_linear = False\n old_linear_weight = old_linear_weight.data[:, idx_input.tolist()].clone()\n\n # prune output layer\n if bn_idx == len(cfg_mask):\n # do not prune the output layer\n idx_output = np.arange(old_linear_weight.shape[0])\n else:\n # prune output dim\n idx_output = np.squeeze(np.argwhere(np.asarray(cfg_mask[bn_idx].cpu().numpy())))\n if idx_output.size == 1:\n idx_output = np.resize(idx_output, (1,))\n old_linear_weight = old_linear_weight.data[idx_output.tolist(), :].clone()\n old_linear_bias = old_linear_bias.data[idx_output.tolist()].clone()\n\n assert old_linear_weight.shape == new_module.weight.shape, f\"Expected same shape to assign, got {old_conv_weight.shape} and {new_module.weight.shape}\"\n assert old_linear_bias.shape == new_module.bias.shape, f\"Expected same shape to assigin, got {old_conv_bias.shape} and {new_module.bias.shape}\"\n\n new_module.weight.data = old_linear_weight.clone()\n new_module.bias.data = old_linear_bias.clone()\n\n return pruned_model", "def copy_model_parameters(sess, net1, net2):\n\n copy_scope_parameters(sess, net1.scope, net2.scope)", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])", "def clone(self):\n return _libsbml.LayoutModelPlugin_clone(self)", "def clone(*args, **kwds) -> None:\n command = command_clone(*args, **kwds)\n io.communicate(command)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)" ]
[ "0.7209398", "0.70578766", "0.6805522", "0.6779844", "0.6744178", "0.6651548", "0.66036665", "0.64693105", "0.6441572", "0.64124787", "0.6412031", "0.64043045", "0.63693136", "0.636181", "0.6343172", "0.6320473", "0.6242863", "0.6219034", "0.61284405", "0.6126675", "0.60146576", "0.5943569", "0.59153146", "0.5906954", "0.5825736", "0.58104175", "0.5756897", "0.5731018", "0.5728004", "0.5720808", "0.57140714", "0.5700848", "0.5675338", "0.5670547", "0.56613505", "0.5658423", "0.56533325", "0.56425667", "0.56396466", "0.56156033", "0.56146926", "0.5610398", "0.5605709", "0.55801326", "0.55781823", "0.55674547", "0.5563414", "0.5540239", "0.55253285", "0.5508469", "0.5492522", "0.5479536", "0.546986", "0.54598784", "0.54587895", "0.54587895", "0.54587895", "0.54587895", "0.5441807", "0.5441463", "0.54229814", "0.5422822", "0.5419902", "0.540761", "0.5407237", "0.54014045", "0.539993", "0.5389636", "0.5380992", "0.5378939", "0.5377364", "0.5372998", "0.53590024", "0.5351966", "0.534988", "0.53440386", "0.53424644", "0.53350693", "0.5333211", "0.53329265", "0.5323762", "0.53019273", "0.5291775", "0.5279974", "0.5275882", "0.5275882", "0.5275882", "0.5275882", "0.5275882", "0.52731663", "0.5270261", "0.5269657", "0.5269657", "0.5269657", "0.5266508", "0.52609974", "0.52514", "0.52514", "0.52514", "0.52514" ]
0.7679161
0
Initialize the joystick components
def init(self): pygame.init() pygame.joystick.init() self.controller = pygame.joystick.Joystick(0) self.controller.init() self.x=0 self.y=0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.isMoving = 0#0 is stop, 1 is moving forward, -1 is moving backward\n self.isRoutating = False\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)", "def __init__(self, setup=False):\n\n # Initilise pygame and joystick\n pygame.init()\n pygame.joystick.init()\n\n # Number of joysticks available\n js_count = pygame.joystick.get_count()\n \n try:\n # Return first joystick object if available, False if not\n self.js = pygame.joystick.Joystick(0)\n self.js.init()\n\n # Setup mode for finding control indices\n if setup == True:\n print \"In setup mode\"\n\n self.num_buttons = self.js.get_numbuttons()\n self.num_axes = self.js.get_numaxes()\n self.num_hats = self.js.get_numhats()\n\n print \"No. buttons: {}\".format(self.num_buttons)\n print \"No. axes: {}\".format(self.num_axes)\n print \"No. hats: {}\".format(self.num_hats)\n\n # Assign controls from joystick name\n if self.js.get_name() == \"PG-9037\" and setup == False:\n print \"Controller detected: PG-9037\"\n self.button_list, self.axis_list, self.hat_list = self.gamepad_default()\n elif setup == False:\n print \"Unfamiliar controller: Using defaults\"\n self.button_list, self.axis_list, self.hat_list = self.gamepad_default()\n\n except Exception, error:\n print \"No controllers detected\"", "def __init__(self, name='demo'):\n init()\n joystick.init()\n for i in range(joystick.get_count()):\n joystick.Joystick(i).init()\n\n State.game = util.load_cfg(name)\n State.clock = Clock(10, State.game['frame_rate'])\n State.window = display.set_mode(State.game['screen_size'])\n\n self._last_joystick_action = None\n self.create_screens()", "def __init__(self, joystick_ID):\n self.isReady = False\n self._jsID = joystick_ID\n pygame.init()\n pygame.joystick.init()\n n = pygame.joystick.get_count()\n if joystick_ID >= 0 and joystick_ID < n:\n # Joystick with that ID was found, initialize it\n self._JS = pygame.joystick.Joystick(joystick_ID)\n self._JS.init()\n\n # Create controller elements\n self.StickL = Stick(self._JS, [AXS_LX, AXS_LY])\n self.StickR = Stick(self._JS, [AXS_RX, AXS_RY])\n\n self.BtnL = Button(self._JS, BTN_LB_ID)\n self.BtnR = Button(self._JS, BTN_RB_ID)\n self.BtnBack = Button(self._JS, BTN_BACK_ID)\n self.BtnStart = Button(self._JS, BTN_START_ID)\n self.BtnA = Button(self._JS, BTN_A_ID)\n self.BtnB = Button(self._JS, BTN_B_ID)\n self.BtnX = Button(self._JS, BTN_X_ID)\n self.BtnY = Button(self._JS, BTN_Y_ID)\n self.BtnStickL = Button(self._JS, BTN_STICK_L_ID)\n self.BtnStickR = Button(self._JS, BTN_STICK_R_ID)\n\n self.HatL = Hat(self._JS, 0)\n self.isReady = True", "def joy_init():\n\n pygame.init();\n pygame.joystick.init();\n if pygame.joystick.get_count() == 0:\n raise Exception(\"joy_init: No joysticks connected\");\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n \n control.tare()\n \n return joystick", "def init(self):\n\n pygame.init()\n pygame.display.set_mode((640, 480))\n pygame.display.set_caption(\"Gears 4 Geeks\")\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ser = serial.Serial('COM4', 9600)\n\n #ADAFRUIT_IO_KEY = 'd1a1bd3737714fa488e0364c775a4b4d' ##This will only be good until the end of the competition\n #self.aio = Client(ADAFRUIT_IO_KEY)", "def __init__(self, joystick):\n\t\tself.js = joystick", "def pyga_joysetup(self):\n jcount=0\n if PYG:\n self.dbgprint(\"pygame starts\")\n jcount=PYG.joystick.get_count()\n if jcount > 0:\n for x in range(jcount):\n j = PYG.joystick.Joystick(x)\n j.init()\n self.dbgprint(\">>>Enabled joystick: %s\" % j.get_name())\n taskMgr.add(self.pyga_joytask, 'tsk_pygajoy')\n else:\n self.dbgprint(\"No Joysticks to Initialize!\")\n\n return jcount", "def init_pygame(self):\n # Startup the pygame system\n pygame.init()\n # Create our window\n self.screen = pygame.display.set_mode((Settings.width, Settings.height))\n # Set the title that will display at the top of the window.\n pygame.display.set_caption(self.title)\n # Create the clock\n self.clock = pygame.time.Clock()\n self.last_checked_time = pygame.time.get_ticks()\n # Startup the joystick system\n pygame.joystick.init()\n # For each joystick we find, initialize the stick\n for i in range(pygame.joystick.get_count()):\n pygame.joystick.Joystick(i).init()\n # Set the repeat delay for key presses\n pygame.key.set_repeat(Settings.key_repeat)\n # Create statistics font\n self.statistics_font = pygame.font.Font(None,30)", "def __init__(self, s_width, s_height, setup):\n pygame.init()\n pygame.font.init()\n\n self.arcade = False\n fullscreen = False\n for opt in setup:\n if opt == Setup.Arcade:\n self.arcade = True\n elif opt == Setup.Fullscreen:\n fullscreen = True\n \n self.joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]\n for j in self.joysticks:\n j.init()\n\n self.display = Display((s_width, s_height), fullscreen)\n self.clock = pygame.time.Clock()\n self.FPS = 60\n\n self.ui = UI(self. display)\n if self.arcade:\n if len(self.joysticks) == 0: \n print(\"=================== plug in the controller ===================\") \n exit(1)\n self.ui.enable_arcade_mode()\n \n self.selected_speed = \"speed Medium\"\n self.game_manager = GameManager(self.display, self.ui, GameMode.EatToGrow, GameState.Menu)", "def __init__(self):\n\n # The Microsoft XBox 360 Wired controller has 11 buttons and 8 axes.\n # Buttons can be 0 (not pressed) or 1 (pressed)\n # Axes are floats and range between -1 and 1. Note that for LT and RT, their \"not pressed\" value is 1 and for the others it is 0. Cross keys only have values -1, 0, and 1. The others have be any value in between -1 and 1.\n num_buttons = 11\n num_axes = 8\n self.inputs = [0 for i in range(num_buttons + num_axes)]\n self.inputs[JoyInput.LT] = self.inputs[JoyInput.RT] = 1\n\n # Dictionary of saved inputs. If an input is not currently saved, you must set it to None.\n # For example, the LS_Y (\"left stick Y\") axis may be saved in self.saved[JoyInput.LS_Y]\n self.saved = {\n JoyInput.LS_Y: None,\n Joystick.RS_ANGLE: None,\n }\n\n # Field variables\n self.depth_state = None # stores the depth state\n self.depth_last_received = 0 # how long since the last depth state callback\n self.depth_pwm_input = 0 # tracks pwm given to depth thrusters\n\n # ROS Subscribers\n rospy.Subscriber(\"/joy\", Joy, self.joy_callback)\n rospy.Subscriber(Topic.YAW_STATE, Float64, self.yaw_state_callback)\n rospy.Subscriber(Topic.DEPTH_STATE, Float64, self.depth_state_callback)\n rospy.Subscriber(Topic.YAW_SETPOINT, Float64, self.yaw_setpoint_callback)\n rospy.Subscriber(Topic.DEPTH_SETPOINT, Int16, self.depth_setpoint_callback)\n\n # ROS Publishers\n # self.topics is a dictionary of dictionaries.\n # 'publisher' contains the rospy.Publisher()\n # 'msg' contains the Int16(), Float64(), or Bool() related to the publisher\n # Use self.publish() rather than using self.topics directly.\n self.topics = {\n Topic.YAW_PWM: {'publisher':rospy.Publisher(Topic.YAW_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PWM_FEEDBACK: {'publisher':rospy.Publisher(Topic.YAW_PWM_FEEDBACK, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PID: {'publisher':rospy.Publisher(Topic.YAW_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.YAW_SETPOINT: {'publisher':rospy.Publisher(Topic.YAW_SETPOINT, Float64, queue_size=10), 'msg':Float64()},\n\n Topic.DEPTH_PWM: {'publisher':rospy.Publisher(Topic.DEPTH_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.DEPTH_PID: {'publisher':rospy.Publisher(Topic.DEPTH_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.DEPTH_SETPOINT: {'publisher':rospy.Publisher(Topic.DEPTH_SETPOINT, Int16, queue_size=10), 'msg':Int16()},\n }", "def init():\n\n global leftDriverStick\n global rightDriverStick\n global goGamePad\n\n try:\n leftDriverStick = T16000M(0)\n except:\n print('OI: Error - Could not instantiate Left Driver Stick on USB port 0!!!')\n\n try:\n rightDriverStick = T16000M(1)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 0!!!')\n\n try:\n goGamePad = Joystick(2)\n except:\n print('OI: Error - Could not instantiate Right Driver Stick on USB port 2!!!')\n\n\n # ----------------------------------------------------------\n # Driver Controls\n # ----------------------------------------------------------\n #global resetYawBtn\n #resetYawBtn = JoystickButton(rightDriverStick, config.btnResetYawAngleIndex)\n #resetYawBtn.whenPressed(NavxResetYawAngle())\n\n global btnDriveSlow\n btnDriveSlow = JoystickButton(leftDriverStick, config.btnDriveSlow)\n \n global btnEnableLightSensor\n btnEnableLightSensor = JoystickButton(leftDriverStick, config.btnEnableLightSensorIndex)\n\n global btnExtendAll\n btnExtendAll = JoystickButton(rightDriverStick, config.btnExtendAllIndex)\n btnExtendAll.whenPressed(ExtendAll())\n\n global btnRetract\n btnRetract = JoystickButton(rightDriverStick, config.btnRetractAllIndex)\n btnRetract.whenPressed(RetractAll())\n\n global btnExtendFront\n btnExtendFront = JoystickButton(rightDriverStick, config.btnExtendFrontIndex)\n btnExtendFront.whenPressed(ExtendFront())\n\n global btnExtendBack\n btnExtendBack = JoystickButton(rightDriverStick, config.btnExtendBackIndex)\n btnExtendBack.whenPressed(ExtendBack())\n\n global btnRetractFront\n btnRetractFront = JoystickButton(rightDriverStick, config.btnRetractFrontIndex)\n btnRetractFront.whenPressed(RetractFront())\n\n global btnCargoGrabTog\n btnCargoGrabTog = JoystickButton(goGamePad, config.btnHatchGrabTogIndex)\n btnCargoGrabTog.whenPressed(ExtendBack())\n \n \"\"\"\n global btnResetEncoders\n btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n btnResetEncoders.whenPressed(TankDriveResetEncoders())\n \"\"\"\n\n \"\"\"\n global axisElevator\n axisElevator = JoystickAxis(goGamePad, config.axisElevatorIndex)\n axisElevator. #??? idk how to configure joystick axis\n \"\"\"\n\n \"\"\"\n global btnRampTog\n btnRampTog = JoystickButton(goGamePad, config.btnRampTogIndex)\n btnRampTog.whenPressed(ExtendFront())\n \"\"\"\n #global btnResetEncoders\n #btnResetEncoders = JoystickButton(leftDriverStick, config.btnResetEncodersIndex)\n #btnResetEncoders.whenPressed(TankDriveResetEncoders())\n\n # These variable names are inconsistent, need to be fixed!!!!\n #global btnRampExtendTog\n #btnRampExtendTog = JoystickButton(goGamePad, config.btnRampExtendTogIndex)\n #btnRampExtendTog.whenPressed(RampExtend())\n\n #global btnRampRetractTog\n #btnRampRetractTog = JoystickButton(goGamePad, config.btnRampRetractTogIndex)\n #btnRampRetractTog.whenPressed(RampRetract())", "def __init__(self):\n super().__init__()\n\n self._registry = {}\n el = gremlin.event_handler.EventListener()\n el.joystick_event.connect(self._joystick_cb)", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def on_update(self, delta_time):\n #pressed = self.window.joys[0].on_joybutton_press \n #print(pressed) # <bound method Joystick.on_joybutton_press of <pyglet.input.base.Joystick object at 0x7f5169264d90>>\n\n #print(type(pressed)) # <class 'method'>\n\n joy_dico = self.window.joys[0]\n\n\n\n btns = joy_dico.buttons\n print(btns)\n #print(type(btns)) # list\n\n print(\">>>>\")\n\n\n print(joy_dico.button_controls) # [Button(raw_name=BTN_A), Button(raw_name=BTN_B), Button(raw_name=BTN_X), Button(raw_name=BTN_Y), Button(raw_name=BTN_TL), Button(raw_name=BTN_TR), Button(raw_name=BTN_SELECT), Button(raw_name=BTN_START), Button(raw_name=BTN_MODE), Button(raw_name=BTN_THUMBL), Button(raw_name=BTN_THUMBR)]\n\n print(joy_dico.button_controls[0].__dict__)\n\n print(\"_______*******\")\n #print(joy_dico.button_controls.BTN_A)\n\n joy_dico = self.window.joys[0]\n\n BTN_A = joy_dico.button_controls[0]\n BTN_B = joy_dico.button_controls[1]\n BTN_X = joy_dico.button_controls[2]\n BTN_Y = joy_dico.button_controls[3]\n BTN_TL = joy_dico.button_controls[4]\n BTN_TR = joy_dico.button_controls[5]\n BTN_SELECT = joy_dico.button_controls[6]\n BTN_START = joy_dico.button_controls[7]\n BTN_MODE = joy_dico.button_controls[8]\n BTN_THUMBL = joy_dico.button_controls[9]\n BTN_THUMBR = joy_dico.button_controls[10]\n\n\n print(f\"\\n BTN_A ----> {BTN_A}\")\n\n\n BTN_list = [BTN_A,BTN_B,BTN_X,BTN_Y, BTN_TL, BTN_TR, BTN_SELECT, BTN_START, BTN_MODE, BTN_THUMBL, BTN_THUMBR]\n\n BTN_fn_list = [self.joy_A, self.joy_B, self.joy_X, self.joy_Y]\n\n for BTN in BTN_list:\n if BTN._value == 1:\n print(f\"=====> >=====> ====> {BTN.raw_name}\")\n\n idx = BTN_list.index(BTN)\n\n BTN_fn_list[idx]()", "def init(self, address, port):\n \n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.event_dict = {}\n\n # Create a TCP/IP socket\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (address, port)\n print('connecting to {} port {}'.format(address, port))\n self.sock.connect(server_address)\n self.axis_data = {i:0 for i in range(7)}\n self.verbose = True", "def __init__(self):\n self.inches_moved = 0\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.running = True\n self.ir_sensor = ev3.InfraredSensor()\n self.color_sensor = ev3.ColorSensor()\n assert self.color_sensor\n assert self.ir_sensor\n assert self.touch_sensor\n self.arm_motor.position = 0\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.pixy\n\n self.right_motor_encoder = self.right_motor.position\n self.left_motor_encoder = self.left_motor.position", "def __init__(self, robot):\n\n #initialise the stick and the smart dashboard (in case we need stuff for auton):\n self.stick = wpilib.Joystick(0)\n self.smart_dashboard = NetworkTable.getTable(\"SmartDashboard\")\n\n #Main stick buttons.\n #-----------------------------------------------------------------------\n trigger = JoystickButton(self.stick, 1)\n thumb = JoystickButton(self.stick, 2)\n three = JoystickButton(self.stick, 3)\n four = JoystickButton(self.stick, 4)\n five = JoystickButton(self.stick, 5)\n six = JoystickButton(self.stick, 6)\n seven = JoystickButton(self.stick, 7)\n eight = JoystickButton(self.stick, 8)\n nine = JoystickButton(self.stick, 9)\n ten = JoystickButton(self.stick, 10)\n eleven = JoystickButton(self.stick, 11)\n twelve = JoystickButton(self.stick, 12)\n\n #Hat switch POV stuff.\n #-----------------------------------------------------------------------\n pov_north = POVButton(self.stick, 0)\n pov_northeast = POVButton(self.stick, 45)\n pov_east = POVButton(self.stick, 90)\n pov_southeast = POVButton(self.stick, 135)\n pov_south = POVButton(self.stick, 180)\n pov_southwest = POVButton(self.stick, 225)\n pov_west = POVButton(self.stick, 270)\n pov_northwest = POVButton(self.stick, 315)\n\n pov_south.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kBack))\n pov_north.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kForward))\n pov_east.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kRight))\n pov_west.whenPressed(SuperStrafeEntertainmentSystem(robot, SuperStrafeEntertainmentSystem.kLeft))", "def inp():\n return joystick", "def input(self):\n\n self.vx, self.vy = 0, 0\n\n game = self.game\n\n x_axis = game.get_axis(0)\n if abs(x_axis) < JOYSTICK_THRESHOLD:\n x_axis = 0\n y_axis = game.get_axis(1)\n if abs(y_axis) < JOYSTICK_THRESHOLD:\n y_axis = 0\n\n if game.get_vbutton_down('left'):\n x_axis = -1\n elif game.get_vbutton_down('right'):\n x_axis = 1\n if game.get_vbutton_down('up'):\n y_axis = -1\n elif game.get_vbutton_down('down'):\n y_axis = 1\n elif game.get_vbutton_down('top_left'):\n x_axis = -1\n y_axis = -1\n elif game.get_vbutton_down('top_right'):\n x_axis = 1\n y_axis = -1\n elif game.get_vbutton_down('bottom_left'):\n x_axis = -1\n y_axis = 1\n elif game.get_vbutton_down('bottom_right'):\n x_axis = 1\n y_axis = 1\n\n # Check for collisions\n if self.get_obstacles(self.spd * x_axis, 0):\n x_axis = 0\n if self.get_obstacles(0, self.spd * y_axis):\n y_axis = 0\n\n self.vx = self.spd * x_axis\n self.vy = self.spd * y_axis\n\n if y_axis != 0:\n self.last_movey = y_axis\n self.last_movex = 0\n elif x_axis != 0:\n self.last_movex = x_axis\n self.last_movey = 0\n\n # diagonals\n if self.vx != 0 and self.vy != 0:\n self.vx *= 0.707\n self.vy *= 0.707\n\n if game.get_vbutton_jp('drop') or game.get_joystick_jp(J_BUTTONS['X']):\n self.drop_item()\n elif game.get_vbutton_jp('pickup') or game.get_joystick_jp(J_BUTTONS['A']):\n self.pickup_items()\n\n return self.is_moving()", "def antenny_init_components(self):\n if self.antenny_config is None:\n print(\"Please load a config before initializing components\")\n if not self.antenny_config.check():\n print(\"Config {} is not valid, failed to initialize\".format(self.antenny_config.get_name()))\n print(\"If you believe this is an error, or you have modified the base components of the antenny board, \"\n \"please check Config class as well as the default configs for more details.\")\n\n self.imu_init()\n self.pwm_controller_init()\n self.elevation_servo_init()\n self.azimuth_servo_init()\n self.screen_init()\n self.gps_init()\n self.telemetry_init()\n self.platform_init()", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0", "def joy_callback(self, joy):\n self.inputs[JoyInput.A] = joy.buttons[0]\n self.inputs[JoyInput.B] = joy.buttons[1]\n self.inputs[JoyInput.X] = joy.buttons[2]\n self.inputs[JoyInput.Y] = joy.buttons[3]\n self.inputs[JoyInput.LB] = joy.buttons[4]\n self.inputs[JoyInput.RB] = joy.buttons[5]\n self.inputs[JoyInput.BACK] = joy.buttons[6]\n self.inputs[JoyInput.START] = joy.buttons[7]\n self.inputs[JoyInput.POWER] = joy.buttons[8]\n self.inputs[JoyInput.LS] = joy.buttons[9]\n self.inputs[JoyInput.RS] = joy.buttons[10]\n\n self.inputs[JoyInput.LS_X] = joy.axes[0]\n self.inputs[JoyInput.LS_Y] = joy.axes[1]\n self.inputs[JoyInput.LT] = joy.axes[2]\n self.inputs[JoyInput.RS_X] = joy.axes[3]\n self.inputs[JoyInput.RS_Y] = joy.axes[4]\n self.inputs[JoyInput.RT] = joy.axes[5]\n self.inputs[JoyInput.CROSS_X] = joy.axes[6]\n self.inputs[JoyInput.CROSS_Y] = joy.axes[7]", "def __init__(self):\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.MAX_SPEED = 900\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def on_init(self):\n self.controller = gameController.Controller()", "def map_joystick(joystick):\n left = baxter_interface.Limb('left')\n right = baxter_interface.Limb('right')\n grip_left = baxter_interface.Gripper('left')\n grip_right = baxter_interface.Gripper('right')\n lcmd = {}\n rcmd = {}\n\n #available joints\n lj = left.joint_names()\n rj = right.joint_names()\n\n #abbreviations\n jhi = lambda s: joystick.stick_value(s) > 0\n jlo = lambda s: joystick.stick_value(s) < 0\n bdn = joystick.button_down\n bup = joystick.button_up\n\n def print_help(bindings_list):\n print(\"press any keyboard key to quit.\")\n for bindings in bindings_list:\n for (test, cmd, doc) in bindings:\n if callable(doc):\n doc = doc()\n print(\"%s: %s\" % (str(test[1][0]), doc))\n\n bindings_list = []\n bindings = (\n ((bdn, ['rightTrigger']), (grip_left.close, []), \"left gripper close\"),\n ((bup, ['rightTrigger']), (grip_left.open, []), \"left gripper open\"),\n ((bdn, ['leftTrigger']), (grip_right.close, []), \"right gripper close\"),\n ((bup, ['leftTrigger']), (grip_right.open, []), \"right gripper open\"),\n ((jlo, ['leftStickHorz']), (set_j, [rcmd, right, rj, 0, 0.1]), lambda i=0:\"right inc \"+rj[i]),\n ((jhi, ['leftStickHorz']), (set_j, [rcmd, right, rj, 0, -0.1]), lambda i=0:\"right dec \"+rj[i]),\n ((jlo, ['rightStickHorz']), (set_j, [lcmd, left, lj, 0, 0.1]), lambda i=0:\"left inc \"+lj[i]),\n ((jhi, ['rightStickHorz']), (set_j, [lcmd, left, lj, 0, -0.1]), lambda i=0:\"left dec \"+lj[i]),\n ((jlo, ['leftStickVert']), (set_j, [rcmd, right, rj, 1, 0.1]), lambda i=1:\"right inc \"+rj[i]),\n ((jhi, ['leftStickVert']), (set_j, [rcmd, right, rj, 1, -0.1]), lambda i=1:\"right dec \"+rj[i]),\n ((jlo, ['rightStickVert']), (set_j, [lcmd, left, lj, 1, 0.1]), lambda i=1:\"left inc \"+lj[i]),\n ((jhi, ['rightStickVert']), (set_j, [lcmd, left, lj, 1, -0.1]), lambda i=1:\"left dec \"+lj[i]),\n ((bdn, ['rightBumper']), (rotate, [lj]), \"left: cycle joint\"),\n ((bdn, ['leftBumper']), (rotate, [rj]), \"right: cycle joint\"),\n ((bdn, ['btnRight']), (grip_left.calibrate, []), \"left calibrate\"),\n ((bdn, ['btnLeft']), (grip_right.calibrate, []), \"right calibrate\"),\n ((bdn, ['function1']), (print_help, [bindings_list]), \"help\"),\n ((bdn, ['function2']), (print_help, [bindings_list]), \"help\"),\n )\n bindings_list.append(bindings)\n\n rate = rospy.Rate(100)\n print_help(bindings_list)\n print(\"press any key to stop. \")\n while not rospy.is_shutdown():\n c = iodevices.getch()\n if c:\n if c == '?':\n print_help(bindings_list)\n else:\n return True\n for (test, cmd, doc) in bindings:\n if test[0](*test[1]):\n cmd[0](*cmd[1])\n if callable(doc):\n print(doc())\n else:\n print(doc)\n if len(lcmd):\n left.set_joint_positions(lcmd)\n lcmd.clear()\n if len(rcmd):\n right.set_joint_positions(rcmd)\n rcmd.clear()\n rate.sleep()\n return False", "def joystickController(self):\n return self.__joystickController", "def create_device(self, layout):\n events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],\n ecodes.EV_REL: []}\n\n # Joystick device\n if layout.axes or layout.buttons or layout.hats:\n self.joystick_dev = next_joystick_device()\n\n for name in layout.axes:\n params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.hats:\n params = (0, -1, 1, 0, 0)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.buttons:\n events[ecodes.EV_KEY].append(name)\n\n if layout.mouse:\n self.mouse_pos = {}\n self.mouse_rel = {}\n self.mouse_analog_sensitivity = float(\n layout.mouse_options.get(\"MOUSE_SENSITIVITY\",\n DEFAULT_MOUSE_SENSITIVTY)\n )\n self.mouse_analog_deadzone = int(\n layout.mouse_options.get(\"MOUSE_DEADZONE\",\n DEFAULT_MOUSE_DEADZONE)\n )\n self.scroll_repeat_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_REPEAT_DELAY\",\n DEFAULT_SCROLL_REPEAT_DELAY)\n )\n self.scroll_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_DELAY\",\n DEFAULT_SCROLL_DELAY)\n )\n\n for name in layout.mouse:\n if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):\n if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:\n # This ensures that scroll wheel events can work\n events[ecodes.EV_REL].append(ecodes.REL_WHEEL)\n else:\n events[ecodes.EV_REL].append(name)\n self.mouse_rel[name] = 0.0\n\n self.device = UInput(name=layout.name, events=events,\n bustype=layout.bustype, vendor=layout.vendor,\n product=layout.product, version=layout.version)\n self.layout = layout", "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def listen(self):\n\n if not self.key_data:\n self.key_data = {}\n for i in range(1024):\n self.key_data[i] = False\n\n if not self.axis_data:\n self.axis_data = {}\n for i in range(self.controller.get_numaxes()):\n self.axis_data[i] = 0.0\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n debug_toggle = True\n print_state_toggle = True\n\n # These parameters define how frequesnt speed setting sent over serial to arduino\n speed_threshold = 10.0 # sets update threshold\n speed_step = 1 # sets acceleration\n speed_delay = 0.01 # delay per 1 step in sec\n\n mode_switch = \"j\" # control mode: k - keyboard, j - joystick\n\n # Parameters for keyboard control mode\n speed = 0.0\n speed_current = 0\n direction = \"r\" # r - release, f - forward, b - backward\n direction_current = \"r\"\n\n # Parameters for joystick control mode\n speed_l = 0\n speed_r = 0\n prev_speed_l = 0\n prev_speed_r = 0\n prev_btn = False\n\n while True:\n prev = self.axis_data\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n self.key_data[event.key] = True\n elif event.type == pygame.KEYUP:\n self.key_data[event.key] = False\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n # check for exit command\n if self.button_data[9] or self.key_data[pygame.QUIT] or self.key_data[pygame.K_ESCAPE]:\n pygame.quit()\n break\n\n # toggle debug\n if self.key_data[pygame.K_d]:\n if debug_toggle:\n print(\"Toggle debug\")\n self.ser.write(b'd')\n debug_toggle = False\n else:\n debug_toggle = True\n\n # print out motors status\n if self.key_data[pygame.K_p]:\n if print_state_toggle:\n self.ser.write(b'p')\n if self.ser.in_waiting:\n print (self.ser.readline())\n print_state_toggle = False\n else:\n print_state_toggle = True\n\n if self.key_data[pygame.K_1] and mode_switch != \"k\":\n mode_switch = \"k\"\n\n if self.key_data[pygame.K_2] and mode_switch != \"j\":\n print(\"Joystick mode: ON\")\n mode_switch = \"j\"\n\n if mode_switch == \"k\": # keyboard control mode\n # accelearte forward\n if self.key_data[pygame.K_a] and direction != \"r\":\n if speed < 255.0:\n speed = speed + speed_step\n sleep(speed_delay)\n # accelerate backward\n if self.key_data[pygame.K_z] and direction != \"r\":\n if speed > 0.0:\n speed = speed - speed_step\n sleep(speed_delay)\n\n if self.key_data[pygame.K_UP] and direction != \"f\":\n direction = \"f\"\n if self.key_data[pygame.K_DOWN] and direction != \"b\":\n direction = \"b\"\n if self.key_data[pygame.K_UP] == False and direction == \"f\":\n direction = \"r\"\n if self.key_data[pygame.K_DOWN] == False and direction == \"b\":\n direction = \"r\"\n\n if math.fabs(speed - speed_current) > speed_threshold or direction != direction_current:\n # print(\"{0}, {1}, {2}, {3}\".format(speed, speed_current, direction, direction_current))\n direction_current = direction\n if direction == \"r\":\n speed = 0.0\n speed_current = int(speed)\n str_r = \"sr\" + direction_current + str(speed_current) + \"e\"\n str_l = \"sl\" + direction_current + str(speed_current) + \"e\"\n print(str_l)\n print(str_r)\n self.ser.write(str_r.encode())\n self.ser.write(str_l.encode())\n\n if(self.key_data[pygame.K_LEFT]):\n str_rf = \"srf\" + str(speed_current) + \"e\"\n self.ser.write(str_rf.encode())\n str_lf = \"slf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_lf.encode())\n elif(self.key_data[pygame.K_RIGHT]):\n str_rb = \"srf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_rb.encode())\n str_lb = \"slf\" + str(speed_current) + \"e\"\n self.ser.write(str_lb.encode())\n\n if (self.key_data[pygame.K_UP] == False and self.key_data[pygame.K_DOWN] == False) and (self.key_data[pygame.K_a] == False and self.key_data[pygame.K_z] == False):\n speed = 0\n speed_current = speed\n direction = \"r\"\n direction_current = direction\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n if mode_switch == \"j\": # joystick control mode\n if self.ser.in_waiting:\n data = str(self.ser.readline().strip())\n data = data[2 :len(data)-1]\n print(data)\n #self.aio.send('Team Hacky Slackers', data)\n\n prev_speed_l = speed_l\n prev_speed_r = speed_r\n speed_threshold = 1\n\n #simplified linear mapping for controller\n speed_l = int((self.axis_data[0]*(-50)) + 90)\n speed_r = int(math.fabs(self.axis_data[3]*255))\n #print(self.axis_data)\n #print(\"curr_l: {0}, perv_l: {1}, curr_r:{2}, perv_r:{3}\".format(speed_l, prev_speed_l, speed_r,prev_speed_r))\n\n if self.axis_data[0] < -0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lf = \"slf\" + str(speed_l) + \"e\"\n self.ser.write(str_lf.encode())\n elif self.axis_data[0] > 0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lb = \"slb\" + str(speed_l) + \"e\"\n self.ser.write(str_lb.encode())\n\n\n if self.axis_data[3] < -0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rf = \"srf\" + str(speed_r) + \"e\"\n self.ser.write(str_rf.encode())\n elif self.axis_data[3] > 0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rb = \"srb\" + str(speed_r) + \"e\"\n self.ser.write(str_rb.encode())\n\n if ( self.axis_data[0] >= -0.05 and self.axis_data[0] <= 0.05 ) and ( self.axis_data[3] >= -0.05 and self.axis_data[3] <= 0.05 ):\n speed_l = 90\n speed_r = 0\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n #Logic to call RFID scan only once per click of R1 button\n # if(prev_btn != self.button_data[5]):\n # prev_btn = self.button_data[5]\n # if self.button_data[5] :\n # print(\"Scanning for RFID Card\")\n # self.ser.write(\"i\".encode())\n\n # clear()\n # pprint.pprint(self.button_data)\n # pprint.pprint(self.axis_data)\n # pprint.pprint(self.hat_data)", "def __init__(self):\n\n self.left_motor = ev3.LargeMotor(ev3.OUTPUT_B)\n self.right_motor = ev3.LargeMotor(ev3.OUTPUT_C)\n self.arm_motor = ev3.MediumMotor(ev3.OUTPUT_A)\n self.touch_sensor = ev3.TouchSensor()\n self.color_sensor = ev3.ColorSensor()\n self.ir_sensor = ev3.InfraredSensor()\n self.pixy = ev3.Sensor(driver_name=\"pixy-lego\")\n\n assert self.left_motor.connected\n assert self.right_motor.connected\n assert self.arm_motor.connected\n assert self.touch_sensor\n assert self.color_sensor\n assert self.ir_sensor\n assert self.pixy", "def robotInit(self):\n\n #Initialize Networktables\n self.sd = NetworkTables.getTable('SmartDashboard')\n\n \n #Set up motors to drive robot\n self.M2 = wpilib.VictorSP(2)\n self.M3 = wpilib.VictorSP(3)\n #self.M2.setInverted(True)\n #self.M3.setInverted(True)\n self.left = wpilib.SpeedControllerGroup(self.M2,self.M3)\n \n self.M0 = wpilib.VictorSP(0)\n self.M1 = wpilib.VictorSP(1)\n self.right = wpilib.SpeedControllerGroup(self.M0,self.M1)\n self.drive = wpilib.drive.DifferentialDrive(self.left, self.right)\n \n \n self.stick = wpilib.Joystick(1)\n self.timer = wpilib.Timer()\n #Camera\n wpilib.CameraServer.launch()\n #Servo\n self.SV1 = wpilib.Servo(9)\n self.SV2 = wpilib.Servo(8) \n #Dashboard\n NetworkTables.initialize(server='10.61.62.2')\n #Switches\n self.SW0 = wpilib.DigitalInput(0)\n self.SW1 = wpilib.DigitalInput(1)\n #Elevator\n self.E = wpilib.VictorSP(5)\n self.prepareCubeFlag = 0\n self.grabCubeFlag = 0\n self.deliverCubeFlag = 0\n self.adjustLeftFlag=0\n self.adjustRightFlag=0\n self.driveFlag=0\n #Gyro\n self.gyro = wpilib.ADXRS450_Gyro(0)\n self.gyro.reset()\n #All possible autonomous routines in a sendable chooser\n '''\n self.chooser = wpilib.SendableChooser()\n self.chooser.addDefault(\"None\", '4')\n self.chooser.addObject(\"left-LeftScale\", '1')\n self.chooser.addObject(\"Middle-LeftScale\", '2')\n self.chooser.addObject(\"Right-LeftScale\", '3')\n self.chooser.addObject(\"Left-RightScale\", '5')\n '''\n #wpilib.SmartDashboard.putData('Choice', self.chooser)\n #Encoders\n self.EC1 = wpilib.Encoder(2,3)\n self.EC2 = wpilib.Encoder(4,5)\n self.EC1.reset()\n self.EC2.reset()", "def initialize(self):\n self.Update()\n ViewportManager.updateAll()\n self.wxStep()\n ViewportManager.initializeAll()\n # Position the camera\n if base.trackball is not None:\n base.trackball.node().setPos(0, 30, 0)\n base.trackball.node().setHpr(0, 15, 0)\n\n # to make persp view as default\n self.perspViewMenuItem.Check()\n self.onViewChange(None, 3)\n\n # initializing direct\n if self.fStartDirect:\n base.startDirect(fWantTk = 0, fWantWx = 0)\n\n base.direct.disableMouseEvents()\n newMouseEvents = [\"_le_per_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.mouseEvents]\n base.direct.mouseEvents = newMouseEvents\n base.direct.enableMouseEvents()\n\n base.direct.disableKeyEvents()\n keyEvents = [\"_le_per_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.keyEvents]\n base.direct.keyEvents = keyEvents\n base.direct.enableKeyEvents()\n\n base.direct.disableModifierEvents()\n modifierEvents = [\"_le_per_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.modifierEvents]\n base.direct.modifierEvents = modifierEvents\n base.direct.enableModifierEvents()\n\n base.direct.cameraControl.lockRoll = True\n base.direct.setFScaleWidgetByCam(1)\n\n unpickables = [\n \"z-guide\",\n \"y-guide\",\n \"x-guide\",\n \"x-disc-geom\",\n \"x-ring-line\",\n \"x-post-line\",\n \"y-disc-geom\",\n \"y-ring-line\",\n \"y-post-line\",\n \"z-disc-geom\",\n \"z-ring-line\",\n \"z-post-line\",\n \"centerLines\",\n \"majorLines\",\n \"minorLines\",\n \"Sphere\",]\n\n for unpickable in unpickables:\n base.direct.addUnpickable(unpickable)\n\n base.direct.manipulationControl.optionalSkipFlags |= SKIP_UNPICKABLE\n base.direct.manipulationControl.fAllowMarquee = 1\n base.direct.manipulationControl.supportMultiView()\n base.direct.cameraControl.useMayaCamControls = 1\n base.direct.cameraControl.perspCollPlane = self.perspView.collPlane\n base.direct.cameraControl.perspCollPlane2 = self.perspView.collPlane2\n\n for widget in base.direct.manipulationControl.widgetList:\n widget.setBin('gui-popup', 0)\n widget.setDepthTest(0)\n\n # [gjeon] to intercept messages here\n base.direct.ignore('DIRECT-delete')\n base.direct.ignore('DIRECT-select')\n base.direct.ignore('DIRECT-preDeselectAll')\n base.direct.ignore('DIRECT-toggleWidgetVis')\n base.direct.fIgnoreDirectOnlyKeyMap = 1\n\n # [gjeon] do not use the old way of finding current DR\n base.direct.drList.tryToGetCurrentDr = False\n\n else:\n base.direct=None\n #base.closeWindow(base.win)\n base.win = base.winList[3]", "def init_controls(self):\n\n\n controls_keypress_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'a': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'q': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land_approach(),\n 'v': lambda: self.toggle_use_voice(),\n 't': lambda: self.toggle_tracking(),\n 'k': lambda: self.toggle_distance_mode(),\n 'm': lambda: self.toogle_manual_control(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n \n \n \n \n \n \n # '0': lambda: self.drone.set_video_encoder_rate(0),\n # '1': lambda: self.drone.set_video_encoder_rate(1),\n # '2': lambda: self.drone.set_video_encoder_rate(2),\n # '3': lambda: self.drone.set_video_encoder_rate(3),\n # '4': lambda: self.drone.set_video_encoder_rate(4),\n # '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'a': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'q': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n controls_keypress_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'q': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'a': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land(),\n 't': lambda: self.toggle_tracking(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n '0': lambda: self.drone.set_video_encoder_rate(0),\n '1': lambda: self.drone.set_video_encoder_rate(1),\n '2': lambda: self.drone.set_video_encoder_rate(2),\n '3': lambda: self.drone.set_video_encoder_rate(3),\n '4': lambda: self.drone.set_video_encoder_rate(4),\n '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'q': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'a': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n if self.kbd_layout == \"AZERTY\":\n self.controls_keypress = controls_keypress_AZERTY\n self.controls_keyrelease = controls_keyrelease_AZERTY\n else:\n self.controls_keypress = controls_keypress_QWERTY\n self.controls_keyrelease = controls_keyrelease_QWERTY\n self.key_listener = keyboard.Listener(on_press=self.on_press,\n on_release=self.on_release)\n self.key_listener.start()", "def __init__(self):\n super(Pad, self).__init__()\n\n self.oldx, self.oldy = -1, -1\n self.width, self.height = -1, -1\n self.surface, self.cr = None, None\n\n self.add_events(gdk.BUTTON_PRESS_MASK\n | gdk.BUTTON_RELEASE_MASK\n | gdk.POINTER_MOTION_MASK\n | gdk.POINTER_MOTION_HINT_MASK)\n self.connect('button-press-event', self.button_press_cb)\n self.connect('button-release-event', self.button_release_cb)\n self.connect('configure-event', self.configure_cb)\n self.connect('expose-event', self.expose_cb)\n self.connect('motion_notify_event', self.motion_notify_cb)", "def __init__(self):\n self.rows = [18, 23, 24, 25]\n self.cols = [17, 27, 22]\n self.keypad = [\n [\"1\", \"2\", \"3\"],\n [\"4\", \"5\", \"6\"],\n [\"7\", \"8\", \"9\"],\n [\"*\", \"0\", \"#\"]\n ]\n self.setup()", "def initialize_all_gamepads():\n joysticks = []\n for joystick_id in range(pg.joystick.get_count()):\n joysticks.append(pg.joystick.Joystick(joystick_id))\n joysticks[joystick_id].init()\n return joysticks", "def ev_joydeviceadded(self, event: tcod.event.JoystickDevice) -> T | None:", "def main():\n\n # Center positions when joystick is at rest\n center_x_pos = 530\n center_y_pos = 504\n\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup([red_led, green_led, blue_led], GPIO.OUT, initial=GPIO.LOW)\n\n pwm_r = GPIO.PWM(red_led, 300)\n pwm_g = GPIO.PWM(green_led, 300)\n pwm_b = GPIO.PWM(blue_led, 300)\n\n pwm_instances = [pwm_r, pwm_g, pwm_b]\n\n for p in pwm_instances:\n p.start(0)\n\n try:\n while True:\n # If joystick switch is pressed down, turn off LEDs\n switch = read_spi_data_channel(mcp3008_switch_channel)\n if switch == 0:\n for p in pwm_instances:\n p.ChangeDutyCycle(0)\n continue\n\n # Read the joystick position data\n x_pos = read_spi_data_channel(mcp3008_x_voltage_channel)\n y_pos = read_spi_data_channel(mcp3008_y_voltage_channel)\n\n # If joystick is at rest in center, turn on all LEDs at max\n if is_joystick_near_center(x_pos, y_pos, center_x_pos, center_y_pos):\n for p in pwm_instances:\n p.ChangeDutyCycle(100)\n continue\n\n # Adjust duty cycle of LEDs based on joystick position\n angle = convert_coordinates_to_angle(x_pos, y_pos, center_x_pos, center_y_pos)\n pwm_r.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'R'))\n pwm_g.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'G'))\n pwm_b.ChangeDutyCycle(calculate_next_pwm_duty_cycle_for_led(angle, 'B'))\n\n # print(\"Position : ({},{}) -- Angle : {}\".format(x_pos, y_pos, round(angle, 2)))\n\n except KeyboardInterrupt:\n pass\n\n finally:\n for p in pwm_instances:\n p.stop()\n spi.close()\n GPIO.cleanup()", "def __init__(self):\n self.cad = pifacecad.PiFaceCAD()\n self.listener = pifacecad.SwitchEventListener(chip=self.cad)\n for i in range(8):\n self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)\n self.listener.activate()\n atexit.register(self.atexit)", "def __init__(self, **traitsDict):\n print \"Instantiating GUI..\"\n super(ADC, self).__init__(**traitsDict)\n self.connection.connect()\n self.oscilloscope = Oscilloscope(connection = self.connection, resolution = self.refreshTime,visibleChannels=self.channelList)\n self.start_timer()", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def __init__(self, front_left_wheel, front_right_wheel,\n\t\t\t\t rear_left_wheel, rear_right_wheel):\n\t\tself._front_left_wheel = front_left_wheel\n\t\tself._front_right_wheel = front_right_wheel\n\t\tself._rear_left_wheel = rear_left_wheel\n\t\tself._rear_right_wheel = rear_right_wheel", "def __init__(self, x1, y1, w, h, dal):\n self._dal = dal\n self._screen_size = Rect(x1, y1, w, h)\n\n self._facade = Facade.facade_layer()\n self._play_button = Rect(426,656,207,58)\n self._quit_button = Rect(686,662,207,58)", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def init(self, x0=None, control=None):\n if x0 is not None:\n self._x = base.getvector(x0, 3)\n else:\n self._x = self._x0.copy()\n\n self._x_hist = []\n\n if self._seed is not None:\n self._random = np.random.default_rng(self._seed)\n\n if control is not None:\n # override control\n self._control = control\n \n if self._control is not None:\n self._control.init()\n\n self._t = 0\n\n # initialize the graphics\n if self._animation is not None:\n\n # setup the plot\n self._ax = base.plotvol2(self.workspace)\n \n self._ax.set_xlabel('x')\n self._ax.set_ylabel('y')\n self._ax.set_aspect('equal')\n self._ax.figure.canvas.manager.set_window_title(\n f\"Robotics Toolbox for Python (Figure {self._ax.figure.number})\")\n\n self._animation.add(ax=self._ax) # add vehicle animation to axis\n self._timer = plt.figtext(0.85, 0.95, '') # display time counter\n\n # initialize the driver\n if isinstance(self._control, VehicleDriver):\n self._control.init(ax=self._ax)", "def __init__(self, pygame, master):\n \n self._pygame = pygame\n self._master = master\n self._display = self._pygame.display\n self._interface = None\n self._state = None\n self._cycle_colour = (200, 0, 0)\n self._white = (255, 255, 255)", "def listen(self):\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n axis=self.axis_data\n\n if 0 in axis:\n self.x=axis[0]\n self.y=-axis[1]\n\n # Turbo\n if self.button_data[7]:\n self.x*=2\n self.y*=2\n # Start Camera\n if self.button_data[3]:\n subprocess.Popen([\"firefox\",otraip+\"/html\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return \"camera\"\n\n # Measure\n if self.button_data[1]:\n return \"measure\"\n\n # Exit\n if self.button_data[2]:\n return \"exit\"\n return \"move \"+str(self.x)+\" \"+str(self.y)+\"\\n\"", "def __init__(self, root, io):\n parts.hand.Hand.__init__(self, root=root, io=io)\n\n dxl_motors = OrderedDict({\n name: dict(conf)\n for name, conf in self.dxl_motors.items()\n })\n\n self.attach_dxl_motors(dxl_motors)\n\n \"\"\"\n self._load_sensor = self.io.find_module('force_gripper')\n self._load_sensor.offset = 4\n self._load_sensor.scale = 10000\n \"\"\"", "def __init__(self):\n self.game_model = ScrollerModel(1280, 480)\n self.view = ScrollerView(self.game_model, 1280, 480)", "def _initControls(self):\n\n print \"DEBUG: Initializing Controls\"\n Game.Controls[pygame.K_a] = Game.MoveLeft\n Game.Controls[pygame.K_d] = Game.MoveRight\n Game.Controls[pygame.K_w] = Game.Jump\n Game.Controls[pygame.K_s] = Game.Duck\n Game.Controls[pygame.K_SPACE] = Game.Fly\n Game.Controls[pygame.K_j] = Game.Fire\n Game.Controls[pygame.K_ESCAPE] = Game.Quit\n\n Game.BoundControls.append(pygame.K_a)\n Game.BoundControls.append(pygame.K_d)\n Game.BoundControls.append(pygame.K_w)\n Game.BoundControls.append(pygame.K_s)\n Game.BoundControls.append(pygame.K_j)\n Game.BoundControls.append(pygame.K_SPACE)\n Game.BoundControls.append(pygame.K_ESCAPE)", "def __init__(self):\n rospy.init_node('TruckSimNode')\n\n self.steer_angle_topic = rospy.get_param('~steer_angle_topic', \"steer_angle\")\n self.chassis_force_topic = rospy.get_param('~chassis_force_topic', \"chassis_force\")\n\n rospy.Subscriber(\"joy\", Joy, self.joyCb)\n\n self.steer_pub = rospy.Publisher(self.steer_angle_topic, Float64, queue_size=1)\n self.chassis_force_pub = rospy.Publisher(self.chassis_force_topic, Float64, queue_size=1)\n\n # array of joy axes:\n # 0: turn - (+ve = left)\n # 1: acceleration (+ve = increase in current direction)\n # 2: gear\n self.steer = 0\n self.accel = 0\n self.gear = 0\n self.steer_joint = Float64()\n self.chassis_force = Float64()", "def __init__(self):\r\n self.pos = [0, 0] # Position\r\n self.vel = [0, 0] # Velocity\r\n self.acc = [0, 0] # Acceleration\r\n # Begins facing towards top of screen\r\n self.heading = math.pi # Heading in radians\r\n # Default screen size causes no wrapping\r\n self.screen = [0, 0]", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def init_devices(self, drone, timestep):\n # time\n self.deltaT = timestep / 1000.\n # Drone's Odometry\n # Position coordinates [Y, Z ,X]\n self.gps = drone.getGPS(\"gps\")\n self.gps.enable(timestep)\n # Angles respect global coordinates [roll, pitch, yaw]\n self.imu = drone.getInertialUnit(\"inertial unit\")\n self.imu.enable(timestep)\n # Accelertion angles [roll, pitch, yaw]\n self.gyro = drone.getGyro(\"gyro\")\n self.gyro.enable(timestep)\n # Direction degree with north as reference\n self.compass = drone.getCompass(\"compass\")\n self.compass.enable(timestep)\n\n # Video acquisition\n fps = 25\n self.camera = drone.getCamera(\"camera\")\n self.camera_rate = 1000 // fps\n self.camera.enable(self.camera_rate)\n\n self.leds = [\n drone.getLED(\"front left led\"),\n drone.getLED(\"front right led\")\n ]\n # gimbal\n self.camera_roll = drone.getMotor(\"camera roll\")\n self.camera_pitch = drone.getMotor(\"camera pitch\")\n\n # Motors\n sides = [\n ['front', 'rear'],\n ['left', 'right']\n ]\n self.motors = [drone.getMotor(\"{} {} propeller\".format(part, side))\n for part in sides[0] for side in sides[1]]\n\n return True", "def __init__(self, app, hwevent):\n super().__init__(hwevent)\n self.wheeldelta = 0\n \"\"\"Integer representing up/down motion of the scroll wheel.\"\"\"\n if self.type == self.mousewheel:\n self.wheeldelta = hwevent.deltaY\n else:\n self.wheeldelta = 0\n rect = app.win.renderer.view.getBoundingClientRect()\n xscale = app.win.width / rect.width\n yscale = app.win.height / rect.height\n self.x = (hwevent.clientX - rect.left) * xscale\n \"\"\"The window x-coordinate of the mouse pointer when the event occurred.\"\"\"\n self.y = (hwevent.clientY - rect.top) * yscale\n \"\"\"The window y-coordinate of the mouse pointer when the event occurred.\"\"\"", "def initialize(self):\n super(QtBaseWidgetComponent, self).initialize()", "def axInit():\n ax.init()", "def set_joystick(self, bool):\n if self.table_ready:\n if bool:\n command = self.build_command(self.device, (\"set_joystick\", \"1\"))\n else:\n command = self.build_command(self.device, (\"set_joystick\", \"0\"))\n self.vcw.write(self.device, command)", "def init_devices(timeStep):\r\n\r\n robot = Robot()\r\n \r\n # Obtener dispositivos correspondientes a las ruedas.\r\n leftWheel = robot.getDevice('left wheel motor')\r\n rightWheel = robot.getDevice('right wheel motor')\r\n # Utilizamos velocidad, establecemos posición a infinito.\r\n leftWheel.setPosition(float('inf'))\r\n rightWheel.setPosition(float('inf')) \r\n leftWheel.setVelocity(0)\r\n rightWheel.setVelocity(0)\r\n\r\n # Obtener y activar el dispositivo de la cámara \r\n camera = robot.getDevice('camera')\r\n camera.enable(timeStep*10)\r\n\r\n\r\n #activar sensores ultrasónicos\r\n ultrasonic_sensors = []\r\n i = 0\r\n for sensor in ultrasonic_sensors_names: \r\n ultrasonic_sensors = ultrasonic_sensors + [robot.getDevice(sensor)]\r\n ultrasonic_sensors[i].enable(timeStep)\r\n i = i + 1\r\n print(ultrasonic_sensors_names)\r\n\r\n #activar sensores infrarojos\r\n infrared_sensors = []\r\n i = 0\r\n for sensor in infrared_sensors_names: \r\n infrared_sensors = infrared_sensors + [robot.getDevice(sensor)]\r\n infrared_sensors[i].enable(timeStep)\r\n i = i + 1\r\n \r\n return robot, camera, leftWheel, rightWheel , infrared_sensors, ultrasonic_sensors", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.joys = initialize_all_gamepads()\n self.done = False\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.cannon = Turret(self.joys[0], (250,250))\n self.objects = pg.sprite.Group()", "def _initComponent(self):\n\n self.optimizer = self._initOptimizer()\n self.scheduler = self._initScheduler()", "def initialize_robot():\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()\n\n proxy_autonomous_life = naoqi.ALProxy(\"ALAutonomousLife\", IP_ROBOT, PORT_ROBOT)\n proxy_autonomous_life.setState(\"disabled\")\n\n proxy_motion = naoqi.ALProxy(\"ALMotion\", IP_ROBOT, PORT_ROBOT)\n proxy_motion.wakeUp()", "def create_widgets(self):\n # self.var_spherical = IntVar()\n # self.var_3d = IntVar()\n # self.var_spatial_audio = IntVar()\n # self.button_open[\"command\"] = self.action_open\n # self.button_inject[\"command\"] = self.action_inject\n pass", "def setup(self):\n\n for row_pin in keypad_row_pins:\n #Set up row-pins\n self.gpio.setup(row_pin, self.gpio.OUT)\n\n for col_pin in keypad_col_pins:\n #Set up col-pins\n self.gpio.setup(col_pin, self.gpio.IN)", "def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()", "def __init__(self):\n # clears the console window\n if sys.platform in ('linux-i386','linux2'):\n os.system(\"clear\")\n elif sys.platform in ('win32','dos','ms-dos'):\n os.system(\"cls\")\n\n # print scripts info\n print self.WELCOME_MESSAGE\n\n # initialize all instance variables\n self.guiElements = {} # dictionary of gui elements (buttons, strings, sliders, ...)\n self.gui_events = [] # list of events\n self.gui_event_ids = {} # dictionary of event ids\n self.config = {} # configuration dictionary\n self.target = None # import or export\n self.callback = None # function to call when config gui is done\n self.texpathIndex = 0\n self.texpathCurrent = ''\n\n # reset GUI coordinates\n self.xPos = self.XORIGIN\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]\n\n # load configuration\n self.load()", "def controls_setup(self):\n pass", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def __init__(self):\n\n ShowBase.__init__(self)\n controls.setup_mouse()\n self.tpp_camera = TPPCamera()\n\n try:\n self.world = World()\n except OSError:\n raise\n\n self.physics = Physics(self.world.player)\n base.taskMgr.add(self.__main_loop, \"__main_loop\")", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def initialize(self):\n super(CircTab,self).initialize()\n self.radius = 2\n # set x and y scales for the circle size and use checkXPos and\n # checkYPos instead of updateSize\n self.initialXScale.config(from_=-self.radius, to=self.radius,\n command=self.checkXPos,resolution=0.01)\n self.initialYScale.config(from_=-self.radius, to=self.radius,\n command=self.checkYPos,resolution=0.01)", "def joy_callback(self, msg:Joy):\n\n teleop_btn_pressed = msg.buttons[0] == 1\n assited_driving_pressed = msg.buttons[3] == 1\n\n if teleop_btn_pressed and not self.enable_teleop_pressed:\n self.teleop_enabled = not self.teleop_enabled\n self.enable_teleop_pressed = True\n rospy.loginfo(\"[XBOX CONTROLLER] Teleop enabled: {}\".format(self.teleop_enabled))\n if self.teleop_enabled:\n self.assisted_driving_enabled = False\n self.rumble(1)\n else:\n self.rumble(2)\n\n if assited_driving_pressed and not self.enable_assisted_driving_pressed:\n self.assisted_driving_enabled = not self.assisted_driving_enabled\n self.enable_assisted_driving_pressed = True\n rospy.loginfo(\"[XBOX CONTROLLER] Assisted driving enabled: {}\".format(self.assisted_driving_enabled))\n if self.assisted_driving_enabled:\n self.teleop_enabled = False\n self.rumble(1)\n else:\n self.rumble(2)\n\n if not teleop_btn_pressed:\n self.enable_teleop_pressed = False\n\n if not assited_driving_pressed:\n self.enable_assisted_driving_pressed = False\n \n joy_buttons_msg = JoyButtons()\n joy_buttons_msg.Header.stamp = rospy.Time.now()\n joy_buttons_msg.Header.frame_id = \"xbox_controller\"\n joy_buttons_msg.left_x = msg.axes[0]\n joy_buttons_msg.left_y = msg.axes[1]\n joy_buttons_msg.right_x = msg.axes[3]\n joy_buttons_msg.right_y = msg.axes[4]\n\n joy_buttons_msg.teleop_enable = msg.buttons[0] == 1\n joy_buttons_msg.assited_driving = msg.buttons[3] == 1\n\n joy_buttons_msg.d_down = msg.axes[7] == -1\n joy_buttons_msg.d_up = msg.axes[7] == 1\n joy_buttons_msg.d_left = msg.axes[6] == 1\n joy_buttons_msg.d_right = msg.axes[6] == -1\n\n joy_buttons_msg.shoulder_l1 = msg.buttons[4] == 1\n joy_buttons_msg.shoulder_l2 = msg.axes[2]\n joy_buttons_msg.shoulder_r1 = msg.buttons[5] == 1\n joy_buttons_msg.shoulder_r2 = msg.axes[5]\n\n self.joy_btn_pub.publish(joy_buttons_msg)", "def init_player():\n global active_track_idx\n global track_last_slided_pos\n global track_last_paused_pos\n global track_total_play_time \n\n # INITIALIZE Player\n active_track_idx = -1\n cancel_update_play_time_loop()\n cancel_track_end_event_loop()\n track_status.set(\"---\")\n track_title.set(\"--- : \")\n play_pause_btn.configure(image=play_img)\n track_last_slided_pos = 0\n track_last_paused_pos = 0\n track_total_play_time = 0\n track_pos_label.configure(text=\"00:00\")\n track_length_label.configure(text=\"00:00\")\n track_pos_slider.configure(state=\"disabled\")\n track_pos.set(0)", "def __defaults__(self): \n self.tag = 'Component'\n self.origin = [[0.0,0.0,0.0]]", "def ev_joyaxismotion(self, event: tcod.event.JoystickAxis) -> T | None:", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None", "def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen", "def __init__(self) -> None:\n self.sensor = serial.Serial(config.DEVICE)\n super().__init__()", "def RobotInit():\n names = [\"Body\"]\n angles = [-0.038392066955566406, 0.1349501609802246, 1.1964781284332275, 0.07512402534484863, -1.4926238059997559, -1.3391400575637817, 0.11500811576843262, 0.029999971389770508, -0.25766992568969727, -0.09506607055664062, -0.9694461822509766, 2.086198091506958, -1.168950080871582, 0.07367396354675293, -0.25766992568969727, 0.10128593444824219, -0.9342479705810547, 2.0663399696350098, -1.186300277709961, -0.07205605506896973, -0.309826135635376, 0.24233007431030273, 0.06131792068481445, 0.8544800281524658, 1.5983860492706299, 0.17799997329711914]\n fractionMaxSpeed = 0.1\n time.sleep(1)\n motion.setAngles(names, angles, fractionMaxSpeed)", "def __init__(self,name,speed,depth_of_view,view_angle,x_coor = \"\",y_coor = \"\"):\n self.name = name\n self.speed = speed # That will the instantenous speed of the robot\n self.depth_of_view = depth_of_view # That will the instantenous depth of view of the robot\n self.view_angle = view_angle # That will the instantenous view angle of the robot\n self.type = \"Robot\" #Specift the object type\n self.x = x_coor # store the position of the robot\n self.y = y_coor # store the position of the robot\n self.kind = name #Store its kind to give the GUI", "def init(self):\n self.focus_modes = []\n for focus_mode in self['focusModes']:\n self.focus_modes.append(\\\n {'modeName': focus_mode.modeName,\n 'lensCombination': eval(focus_mode.lensCombination),\n 'lensModes': eval(focus_mode.lensModes),\n 'size': eval(focus_mode.size),\n 'message': eval(focus_mode.message),\n 'diverg': eval(focus_mode.divergence)})\n self.focus_motors_dict = {}\n\n focus_motors = []\n focus_motors = eval(self.getProperty('focusMotors'))\n\n for focus_motor in focus_motors:\n self.focus_motors_dict[focus_motor] = []\n\n #TODO\n self.motors_groups = [self.getObjectByRole(\"P14ExpTbl\"),\n self.getObjectByRole(\"P14KB\"),\n self.getObjectByRole(\"P14DetTrans\"),\n self.getObjectByRole(\"P14BCU\"),\n self.getObjectByRole(\"slitsMotors\")]\n \n\n if len(self.motors_groups) > 0:\n for motors_group in self.motors_groups:\n self.connect(motors_group,\n 'mGroupFocModeChanged',\n self.motor_group_focus_mode_changed)\n else:\n logging.getLogger(\"HWR\").debug('BeamFocusing: No motors defined')\n self.active_focus_mode = self.focus_modes[0]['modeName']\n self.size = self.focus_modes[0]['size']\n self.update_values()\n\n self.cmd_set_calibration_name = self.getCommandObject(\\\n 'cmdSetCallibrationName')\n try:\n self.cmd_set_phase = eval(self.getProperty('setPhaseCmd'))\n except:\n pass", "def initialize_electronics(self):\n\n self.electronics = ArduinoModel(**self.config['electronics']['arduino'])\n self.logger.info('Initializing electronics arduino')\n self.electronics.initialize()", "def robotInit(self):\n # Update constants from json file on robot\n Constants.updateConstants()\n # Robot odemetry command\n self.updateodemetry = updateodemetry.UpdateOdemetry()\n # Set command group member variables\n self.autonomous = autogroup.AutonomousCommandGroup()\n self.disabled = disabledgroup.DisabledCommandGroup()\n self.teleop = teleopgroup.TeleopCommandGroup()\n self.test = testgroup.TestCommandGroup()", "def setup_sensors(self, configs):\n self.__sensors = self.setup_components(configs, 'scale_client.sensors')", "def initialize(self):\n super(QtWidgetComponent, self).initialize()\n shell = self.shell_obj\n self.set_enabled(shell.enabled)\n if shell.bgcolor:\n self.set_bgcolor(shell.bgcolor)\n if shell.fgcolor:\n self.set_fgcolor(shell.fgcolor)\n if shell.font:\n self.set_font(shell.font)", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.screen = None\n self.engine = None\n self.engines = []\n self.i_e = InputEngine()\n self.e_e = EventEngine(self.i_e)", "def ev_joybuttonup(self, event: tcod.event.JoystickButton) -> T | None:", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.patrol_mode = False\n self.enemy_not_detected = True\n print(\"+++++ self.patrol_mode = {} y self.enemy_not_detected = {}\".format(self.patrol_mode, self.enemy_not_detected))\n self.positionX = 0\n self.positionY = 0\n self.direction = ['forward', 'right', 'backward', 'left']\n self.offset = [0, 1, 0, -1]\n self.index = 0\n self.pointing = self.direction[self.index]\n\n\n # Connect two large motors on output ports B and C\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n self.weapon = MediumMotor(OUTPUT_A)\n self.sound = Sound()\n self.leds = Leds()\n self.ir = InfraredSensor()\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()\n threading.Thread(target=self._proximity_thread, daemon=True).start()", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def __init__(self, touch_sensor, port=\"A\"):\r\n # ---------------------------------------------------------------------\r\n # Done: 4. Read the following, ASKING QUESTIONS AS NEEDED.\r\n # Once you understand the code, change this _TODO_ to DONE.\r\n # ---------------------------------------------------------------------\r\n\r\n # The arm motor should always use a 100% duty cycle for its speed.\r\n self.speed = 100\r\n\r\n self.arm_touch_sensor = touch_sensor\r\n self.arm_motor = rose_ev3.Motor(port, motor_type=\"medium\")\r\n self.is_calibrated = False", "def _init_system(*args):\n __set_time_elements(args[0], args[1])\n __set_control_elements(args[0], args[2], args[3])\n __set_sensor_elements(args[0], args[4], args[5], args[6], args[7])", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def initAudio(self):\n\t\t# Initialize pitch detection\n\t\tself.listener = PitchDetect(channels=1)\n\t\tself.listener.listen()\n\t\tself.recording = False\n\t\tself.paused = False", "def __init__(self):\n game_engine = get_gameengine()\n if game_engine is not None:\n self = game_engine\n else:\n ## The targeted frames per second\n self.target_fps = 200\n\n ## The start time\n self.time = time.time()\n\n ## A list of all registered game objects\n self.game_objects = list()\n\n ## A list of colliders\n self.colliders = list()\n\n ## Manage the user inputs\n self.input_manager = InputManager(self)\n\n ## Determines the state of the Game Engine\n self.running = False\n\n ## Variable to pause the Game Engine\n self.paused = False\n\n self.time_node = pm.PyNode('time1')\n # end if", "def __init__(self):\n\n # Screen's settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n\n # Bluebee Settings\n self.bb_speed = 1.0\n\n # Moving test.\n self.counter = 0\n self.max_left = 400\n self.max_up = 300\n self.max_right = 400\n self.max_down = 300", "def __init__(self, width, height, title):\r\n super().__init__(width, height, title)\r\n\r\n # door list\r\n self.door_list = None\r\n\r\n # wall list\r\n self.wall_list = None\r\n\r\n # back ground list\r\n self.background_list = None\r\n\r\n # Sprite lists\r\n self.player_list = None\r\n\r\n # Set up the player\r\n self.player = None\r\n\r\n # physics engine\r\n self.physics_engine = None\r\n\r\n # map change\r\n self.map_change = 1\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0" ]
[ "0.77946836", "0.7709619", "0.7708428", "0.7690953", "0.76627296", "0.7585769", "0.7510696", "0.7043306", "0.69623667", "0.68985695", "0.6756267", "0.67009485", "0.65528977", "0.6516639", "0.64917547", "0.63566685", "0.63212687", "0.6281358", "0.61667794", "0.612322", "0.61198604", "0.61082023", "0.6049388", "0.60411876", "0.59494656", "0.5938758", "0.59214735", "0.5913836", "0.59133005", "0.59058243", "0.5850376", "0.5847713", "0.584524", "0.58427054", "0.5823927", "0.5820689", "0.58192", "0.57918286", "0.5787314", "0.5778682", "0.57422614", "0.574083", "0.57338876", "0.5727815", "0.56916577", "0.5682204", "0.568133", "0.56603837", "0.56518376", "0.56513125", "0.56509274", "0.5642768", "0.5636053", "0.5593743", "0.5592437", "0.55887467", "0.558644", "0.5571262", "0.55671746", "0.55633885", "0.5560899", "0.5539105", "0.55385643", "0.55371326", "0.5532361", "0.5527472", "0.5521483", "0.5517971", "0.55107373", "0.5509", "0.5503082", "0.54951704", "0.54945403", "0.5492864", "0.54919", "0.5489581", "0.5488261", "0.5487227", "0.54807055", "0.5466891", "0.54606164", "0.5459823", "0.5439054", "0.54384893", "0.5433336", "0.543189", "0.5422447", "0.54196763", "0.54188967", "0.54130405", "0.5400951", "0.5396079", "0.5389247", "0.5387858", "0.53869826", "0.53843915", "0.5379572", "0.53774655", "0.5376434", "0.5367961" ]
0.82271236
0
Listen for events to happen
def listen(self): if not self.axis_data: self.axis_data = {} if not self.button_data: self.button_data = {} for i in range(self.controller.get_numbuttons()): self.button_data[i] = False if not self.hat_data: self.hat_data = {} for i in range(self.controller.get_numhats()): self.hat_data[i] = (0, 0) for event in pygame.event.get(): if event.type == pygame.JOYAXISMOTION: self.axis_data[event.axis] = round(event.value,2) elif event.type == pygame.JOYBUTTONDOWN: self.button_data[event.button] = True elif event.type == pygame.JOYBUTTONUP: self.button_data[event.button] = False elif event.type == pygame.JOYHATMOTION: self.hat_data[event.hat] = event.value axis=self.axis_data if 0 in axis: self.x=axis[0] self.y=-axis[1] # Turbo if self.button_data[7]: self.x*=2 self.y*=2 # Start Camera if self.button_data[3]: subprocess.Popen(["firefox",otraip+"/html"], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) return "camera" # Measure if self.button_data[1]: return "measure" # Exit if self.button_data[2]: return "exit" return "move "+str(self.x)+" "+str(self.y)+"\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def events(self):", "def handleEvents(self, events):\n pass", "def on_event(self, event):\n pass", "def on_event(self, event):\r\n pass", "def on_event(self, event):", "def listen(self):\n pass", "def event_receive(self,event):\n\n pass", "def handle_event(self, event):", "def listen(self):\n while self.active:\n self.handle_input()", "def listener(self, event):\n print \"TB:@%s arrived event %s\" % (event.time, event) \n informFunction = self._informFunc\n informFunction((event.time, event.state))\n return []", "def event_in_cb(self, msg):\n self.event = msg.data", "def handleEvent(self, event):\n pass", "def handle_event(self, event):\n pass", "def __handle_events(self):\r\n for event in pygame.event.get():\r\n self.controller.handle_event(event)", "def on_event(self, events):\n raise NotImplemented(\"on_event method should be implemented.\")", "def listen_clicks(self):\n self.time_correction = self.get_time_corr()\n self.listen_start = self.master_clock()\n self._clear_events()", "def listen(self, debug=True):\n if len(self._handlers) == 0:\n warnings.warn('No event handler has been added.')\n\n events = self._events_fetcher()\n if debug and len(events) > 0:\n print events\n\n for e in events:\n if self._handlers.has_key(e):\n self._handlers[e]()", "def doEvent(self, source):\n pass", "def visit_event(self, event):", "def react_to_event(self):\n raise NotImplementedError()", "def subscribe(self, event_handler):\n pass # pragma: no cover", "def process_event(self, event):\r\n pass", "def listen(self):\n raise NotImplementedError()", "def hook_events(self):\n wxMediator.hook_events(self)\n EVT_MINE(self, wxEVT_SOCKET_DATA, self.on_data)\n EVT_MINE(self, wxEVT_NEW_LISTEN_CONN, self.new_listen_conn)\n EVT_MINE(self, wxEVT_NEW_TALK_CONN, self.new_talk_conn)", "def on_event(self, event):\r\n\r\n print(\"on event called, event:\", event)\r\n\r\n self.state = self.state.on_event(event)\r\n publish_state_msg(state_msg, odrive_bridge.get_state())", "def event_loop(self):\n for event in pygame.event.get():\n self.scene.get_event(event)", "def run(self, event):\n pass", "def __listener__(self):\n frame_interval = 0.1\n str_list = []\n c = ''\n while True:\n with Timeout(frame_interval, False):\n while True:\n try:\n c = self.ser.read()\n except:\n self.ser.close()\n self.make_connection.go()\n self.connection_made.wait()\n str_list.append(c)\n if c == \"\\n\" or c == '':\n break\n received = ''.join(str_list)\n str_list = []\n if received:\n for i in self.read_handlers:\n gevent.spawn(i, received)\n sleep(0.001)", "def listen(self):\n while self.active:\n Quartz.CFRunLoopRunInMode(\n Quartz.kCFRunLoopDefaultMode, 5, False)", "def process(self, event):\n pass", "def listen(self, debug=False):\n if len(self._handlers) == 0:\n warnings.warn('No event handler has been added.')\n\n events = self._events_fetcher()\n events = self._process_events(events)\n if debug and len(events) > 0:\n print events\n\n for e in events:\n if self._handlers.has_key(e):\n handler, delay = self._handlers[e]\n t = time.time()\n if t >= self._records[e] + delay:\n self._records[e] = t\n handler()", "def listen(self) -> None:\n raise NotImplementedError", "def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name", "def __init__(self):\n _subscribe_marked_events(self)", "def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()", "def listen(self):\n if self.listening:\n return\n\n if self.mode == gpio.IN:\n # Remove any existing detection\n gpio.remove_event_detect(self.bcm_id)\n\n # Use separate callbacks for rising and falling edges\n gpio.add_event_detect(self.bcm_id, gpio.BOTH,\n callback=self._edge)\n\n self.listening = True", "def do_input_events(self):\r\n for event in EventStream.allNext(self.streams):\r\n if self.handler.event(event) and self.unhandledHandler:\r\n self.unhandledHandler(event)", "def events(self) -> None:\n\n for event in pg.event.get():\n if event.type == TIMER:\n if self.game_state == InGameState.RUNNING:\n self.timer -= 1\n else:\n self.display_timer -= 1\n\n if event.type == pg.QUIT:\n self.quit()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n self.quit()", "async def events(self) -> Iterable[Event]:", "def on_event_finished(self, event):", "def listen_presses(self):\n self.time_correction = self.get_time_corr()\n self.listen_start = self.master_clock()\n self._clear_events()", "def collect_events(self, inputs, ew):\n input_module.collect_events(self, inputs, ew)", "def event_loop(self):\n for event in pg.event.get():\n self.keys = pg.key.get_pressed()\n if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:\n self.done = True\n self.cannon.get_event(event, self.objects)", "def events_and_callbacks_qi_framework():\n\n # ALMemory acts as the hub for the distribution of event notifications.\n # Source: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/naoqi-apis/naoqi-core/almemory\n # Example: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/other-tutorials/python-sdk-tutorials/python-sdk-examples/vision/face\n\n # Create a broker\n # TODO(TK): why?\n naoqi.ALBroker(\"pythonBroker\", IP_ME, PORT_ME, IP_ROBOT, PORT_ROBOT)\n\n proxy_memory = naoqi.ALProxy(\"ALMemory\", IP_ROBOT, PORT_ROBOT)\n\n # Register callback:\n def mycallback(key, value):\n print(\"qi callback: key={}, value={}\".format(key, value))\n sess = proxy_memory.session()\n mem = sess.service(\"ALMemory\")\n sub = mem.subscriber(\"FaceDetected\")\n sub.signal.connect(functools.partial(mycallback, \"FaceDetected\"))\n\n # Raise an event:\n proxy_memory.raiseEvent(\"FaceDetected\", str(datetime.datetime.now()))\n proxy_memory.raiseEvent(\"AnotherEvent\", str(datetime.datetime.now()))\n time.sleep(0.1) # give it some time to process", "def start(self, event):\n return", "def ProcessEvents(self):\n self.work_queue.put(self.__ProcessEventsAsync)", "def onNewEvent(self, event):\n self._logger.debug('Received event: %s' % event)", "def events(self, events):\n\n self._events = events", "def handle_event(self, event, window):\n pass", "def on(self): # pylint: disable=invalid-name\n self._make_event(1)", "def handle_event(self, event):\n self.give_sub_event.handle_event(event)", "def msg_event(self, event):\r\n pass", "def runEventCallbacks(self, event, *args):\n\n if not event in self.EVENT_TYPES:\n raise Exception(\"XnatIo (onEvent): invalid event type '%s'\"%(\\\n event))\n if not hasattr(self, 'eventCallbacks__'):\n print('self has no attribute eventCallbacks__')\n return\n\n for callback in self.eventCallbacks__[event]:\n #print(f\"EVENT CALLBACK {event}\")\n callback(*args)", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def eventInCallback(self, msg):\n rospy.loginfo(\"event_in msg received\")\n self.event_in = msg.data", "def _on_event(self, event) -> None:\n self.signal.emit(event)", "def listen(self):\n\n # It's ideal to start listening before the game starts, but the\n # down-side\n # is that object construction may not be done yet. Here we pause\n # shortly\n # to let initialization finish, so all functionality (e.g. self.log)\n # is\n # available.\n time.sleep(0.1)\n\n for st in self.sentences():\n if st:\n self.onMessage(source=None, message=st)", "def eventReceived(self, event):\n print repr(event)", "def handle_events(self):\n self._busy_mutext.acquire()\n try:\n event = self.EventsFactory.pull_event()\n while event:\n self.logger.debug('Handling new event: {}'.format(event.id))\n event_endpoint_scope_classes = event.EndpointScope.get_static_hierarchy()\n stat_collection = []\n for statistics_cls in self._statistics:\n if statistics_cls.EndpointScope in event_endpoint_scope_classes:\n statistics = statistics_cls.init_by_event(event)\n self.logger.debug(f'Collecting statistics: {statistics}')\n stat_collection.append(statistics)\n statistics.collect()\n self.logger.debug('Checking for tasks to run')\n for task_cls in self.get_conditional_tasks():\n if task_cls.EndpointScope in event_endpoint_scope_classes:\n task_endpoint_scope_classes = task_cls.EndpointScope.get_static_hierarchy()\n statistics = []\n for stats in stat_collection:\n if stats.Endpoint == task_cls.Endpoint and stats.EndpointScope in task_endpoint_scope_classes:\n statistics.append(stats)\n task = task_cls(event.EndpointScope.init_by_event(event), statistics, event)\n task.handle()\n event = self.EventsFactory.pull_event()\n finally:\n self._busy_mutext.release()", "def on(self) -> None:\n ...", "def handle_event(self, event):\n raise NotImplementedError(\n \"handle_event() is not implemented for base class.\")", "def event_handler(self, response):\n pass", "def init_events_transmitter():\n class StatusListener(SubscribeCallback):\n def status(self, pubnub, status):\n event = \"unknown\"\n\n if status.operation == PNOperationType.PNSubscribeOperation \\\n and status.category == PNStatusCategory.PNConnectedCategory:\n event = \"Connect\"\n elif status.operation == PNOperationType.PNUnsubscribeOperation \\\n and status.category == PNStatusCategory.PNAcknowledgmentCategory:\n event = \"Unsubscribe\"\n\n asyncio.ensure_future(pubnub.publish().channel('status-' + APP_KEY).message({\n \"event\": event\n }).future(), loop=loop)\n\n def presence(self, pubnub, presence):\n pass\n\n def message(self, pubnub, message):\n pass\n\n listener = StatusListener()\n pubnub.add_listener(listener)", "def _did_receive_event(self, connection):\n\n if not self._is_running:\n return\n\n if connection.has_timeouted:\n return\n\n response = connection.response\n data = None\n\n if response.status_code != 200:\n pushcenter_logger.error(\"[NURESTPushCenter]: Connection failure [%s] %s\" % (response.status_code, response.errors))\n\n else:\n data = response.data\n\n if len(self._delegate_methods) > 0:\n for m in self._delegate_methods:\n try:\n m(data)\n except Exception as exc:\n pushcenter_logger.error(\"[NURESTPushCenter] Delegate method %s failed:\\n%s\" % (m, exc))\n elif data:\n events = data['events']\n self.nb_events_received += len(events)\n self.nb_push_received += 1\n\n pushcenter_logger.info(\"[NURESTPushCenter] Received Push #%s (total=%s, latest=%s)\\n%s\" % (self.nb_push_received, self.nb_events_received, len(events), json.dumps(events, indent=4)))\n self._last_events.extend(events)\n\n if self._is_running:\n uuid = None\n if data and 'uuid' in data:\n uuid = data['uuid']\n\n self._listen(uuid)", "def rfactor_event_loop():\n if RfactorLiveEvent.event.is_set():\n is_live = RfactorLiveEvent.get_nowait()\n # -- Update rFactor live state to front end\n if is_live is not None:\n eel.rfactor_live(is_live)\n\n if RfactorStatusEvent.event.is_set():\n status = RfactorStatusEvent.get_nowait()\n # -- Update rFactor status message in front end\n if status is not None:\n logging.debug('Updating rf2 status message: %s', status)\n eel.rfactor_status(status)\n\n RfactorStatusEvent.reset()", "def processEvents(self):\n if not self.getIsConnected():\n return\n\n # Loop until there is no more data in the receive buffer.\n while True:\n if not self._socketPoller.isReady():\n # There is no data waiting.\n return\n\n nBytesRead, _ = self._socket.recvfrom_into(self._buffer)\n if nBytesRead <= 0:\n # Since we checked for data ready, we don't expect this.\n return\n\n # _bufferView is a memoryview, so we can slice efficienty.\n self._elementReader.onReceivedData(self._bufferView[0:nBytesRead])", "def _setup_events(self):\n # Bind all events from our buttons (including 'exit')\n self.Bind(wx.EVT_BUTTON, self._process_event)", "def enable_callbacks(self):\n\n onObjectUpdate_received = self.message_handler.register('ObjectUpdate')\n onObjectUpdate_received.subscribe(self.onObjectUpdate)\n\n onObjectUpdateCached_received = self.message_handler.register('ObjectUpdateCached')\n onObjectUpdateCached_received.subscribe(self.onObjectUpdateCached)\n\n onObjectUpdateCompressed_received = self.message_handler.register('ObjectUpdateCompressed')\n onObjectUpdateCompressed_received.subscribe(self.onObjectUpdateCompressed)\n\n onImprovedTerseObjectUpdate_received = self.message_handler.register('ImprovedTerseObjectUpdate')\n onImprovedTerseObjectUpdate_received.subscribe(self.onImprovedTerseObjectUpdate)\n \n onObjectProperties_received = self.message_handler.register('ObjectProperties')\n onObjectProperties_received.subscribe(self.onObjectProperties)\n\n onKillObject_received = self.message_handler.register('KillObject')\n onKillObject_received.subscribe(self.onKillObject)\n\n # uncomment these to view packets sent back to simulator\n # onObjectName_sent = self.message_handler.register('ObjectName')\n # onObjectName_sent.subscribe(self.helpers.log_packet, self)\n\n # onDeRezObject_sent = self.message_handler.register('DeRezObject')\n # onDeRezObject_sent.subscribe(self.helpers.log_packet, self)", "def handle_events(self, events):\n for event in events:\n event_type = event['type']\n if event_type == types.SO_CHANGE:\n for key in event['data']:\n self.data[key] = event['data'][key]\n self.on_change(key)\n\n elif event_type == types.SO_REMOVE:\n key = event['data']\n assert key in self.data, (key, self.data.keys())\n del self.data[key]\n self.on_delete(key)\n\n elif event_type == types.SO_SEND_MESSAGE:\n self.on_message(event['data'])\n else:\n assert False, event", "def _listen(self):\n while not self._stop_signal: # run through all signals\n logger.warning('checking signal')\n step_time_start = datetime.datetime.utcnow()\n next_signal = self._get_next_signal(step_time_start)\n if not next_signal:\n # all steps are finished\n self._success_finish()\n break\n self.current_signal = next_signal\n try:\n self._run_signal(next_signal, step_time_start)\n except FlowError as ex:\n self._signal_failed(next_signal, datetime.datetime.utcnow(), ex)\n break\n self.finish_date = datetime.datetime.utcnow()\n self._finish()", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def accept(self, event):\n raise NotImplementedError()", "def __init__(self):\n self._listeners = []", "def __call__(self, event):\n if not self.events or event in self.events:\n super(EventHandler, self).__call__(event)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def _add_listeners(vehicle):\n @vehicle.on_attribute('mode')\n def mode_listener(self,name, msg):\n util.log_info(\"Mode switched to %s\" % msg.name)\n \n if msg.name != shared.status['manual_mode']: # manual override\n if msg.name == 'RTL' or msg.name == 'LAND':\n util.log_warning(\"External %s detected. Abort.\" % msg.name)\n shared.status['abort'] = True\n \n @vehicle.on_attribute('gps_0')\n def gps_listener(self,name, msg): # monitor satellites\n if not shared.status['thread_flag'] & shared.NSATS_TOO_LOW:\n if msg.satellites_visible < 6:\n util.log_warning(\"Satellites dropped below 5!\")\n shared.status['thread_flag'] |= shared.NSATS_TOO_LOW\n \n elif msg.satellites_visible >= 10:\n util.log_info(\"Satellites recovered to %d.\" % msg.satellites_visible)\n shared.status['thread_flag'] &= ~shared.NSATS_TOO_LOW\n \n @vehicle.on_message('SYSTEM_TIME')\n def time_listener(self,name, msg): # log timestamp\n format = '%Y-%m-%d %H:%M:%S'\n val = time.localtime(msg.time_unix_usec/1000000)\n shared.timestamp = time.strftime(format, val)", "def _check_events(self):\n for event in pygame.event.get():\n # quit stuff\n if event.type == pygame.QUIT:\n sys.exit()\n # mouse click for 'PLAY' button\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n\n # checks for key down/up events and sends it to appropriate method\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def InitOtherEvents(self):\n\n pass", "def start_listener():\n listener = keyboard.Listener(\n on_press=on_press\n )\n listener.start()", "def trigger(self, type, event):", "def on_open(self):\n def event_loop():\n logger.debug(pformat(self.query.request))\n self.send(json.dumps(self.query.request))\n while not self.event.is_set():\n #print('Waiting around on the socket: %s' % self.gettimeout())\n self.event.wait(self.gettimeout())\n \n logger.debug('Event loop terminating.')\n \n self.thread = threading.Thread(\n target=event_loop)\n self.thread.setDaemon(True)\n self.thread.start()", "def _start_listeners(self):\n if self.listeners:\n self.state = \"listening\"\n for event_listener in self.listeners:\n event_listener.start()\n\n for listener in self.listeners:\n listener.join()", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def listen():\n msg = MSG()\n ctypes.windll.user32.GetMessageA(ctypes.byref(msg), 0, 0, 0)", "def check_events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.ai_game.quit()\r\n elif event.type == pg.KEYDOWN:\r\n self._check_keydown_events(event)\r\n elif event.type == pg.KEYUP:\r\n self._check_keyup_events(event)\r\n elif event.type == pg.MOUSEBUTTONDOWN:\r\n mouse_pos = pg.mouse.get_pos()\r\n self._check_button(mouse_pos)", "def handle_events(self):\n for event in pymlgame.get_events():\n if event.type == pymlgame.E_NEWCTLR:\n print('new ctlr with uid:', event.uid)\n elif event.type == pymlgame.E_KEYDOWN:\n if event.button == pymlgame.CTLR_UP:\n if self.snake.direction != DOWN:\n self.snake.direction = UP\n elif event.button == pymlgame.CTLR_DOWN:\n if self.snake.direction != UP:\n self.snake.direction = DOWN\n elif event.button == pymlgame.CTLR_LEFT:\n if self.snake.direction != RIGHT:\n self.snake.direction = LEFT\n elif event.button == pymlgame.CTLR_RIGHT:\n if self.snake.direction != LEFT:\n self.snake.direction = RIGHT\n elif event.type == pymlgame.E_PING:\n print('ping from', event.uid)", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def events(self, events: object):\n\n self._events = events", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # if the exit button on screen is clicked close the program\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))", "def user_input_listener(state: SharedState):" ]
[ "0.76001936", "0.7514999", "0.736961", "0.73453784", "0.7160621", "0.70146126", "0.6963289", "0.6895858", "0.6895355", "0.6894317", "0.6875384", "0.6862019", "0.68443125", "0.6844133", "0.6816759", "0.669522", "0.6687219", "0.66471237", "0.65838945", "0.6572624", "0.65486026", "0.65244305", "0.650016", "0.6493089", "0.64855194", "0.64776796", "0.6469066", "0.6424169", "0.6369017", "0.63621837", "0.63418996", "0.6339079", "0.6328833", "0.63265634", "0.6311399", "0.63006157", "0.62937975", "0.62931955", "0.62831235", "0.6277216", "0.6241927", "0.62376857", "0.6200728", "0.6199271", "0.6196973", "0.61952424", "0.61722404", "0.6164794", "0.6143602", "0.61303747", "0.6094026", "0.60839736", "0.60617715", "0.60540575", "0.6041843", "0.60389555", "0.60380435", "0.6035985", "0.60324836", "0.60311246", "0.6025673", "0.6023607", "0.601563", "0.6010736", "0.6009867", "0.6005563", "0.6004079", "0.59974277", "0.59946007", "0.5992088", "0.5982982", "0.5982982", "0.5980777", "0.5980721", "0.5978772", "0.596586", "0.596586", "0.5964988", "0.59617424", "0.59591657", "0.59579134", "0.594839", "0.5946978", "0.59416324", "0.5941283", "0.5941283", "0.59390724", "0.592639", "0.59259564", "0.59259564", "0.59259564", "0.59259564", "0.59259564", "0.59259564", "0.59259564", "0.59259564", "0.5925499", "0.5925079", "0.5917832", "0.59124845" ]
0.63651896
29
Shift the colormap by dragging the cursor left or right. Stretch the colormap by dragging the cursor up or down.
def ms_contrast(self, viewer, event, data_x, data_y, msg=True): if not self.cancmap: return False event.accept() msg = self.settings.get('msg_contrast', msg) x, y = self.get_win_xy(viewer) if event.state == 'move': self._tweak_colormap(viewer, x, y, 'preview') elif event.state == 'down': self._start_x, self._start_y = x, y if msg: self.onscreen_message( "Shift and stretch colormap (drag mouse)", delay=1.0) else: self.onscreen_message(None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_cmap(cmap, start=0., locpoint=0.5, stop=1.0, name='centered'):\r\n\r\n # declare a colour + transparency dictionary\r\n cdict={'red':[], 'green':[], 'blue':[], 'alpha':[]}\r\n\r\n # regular index to compute the colors\r\n RegInd = np.linspace(start, stop, cmap.N)\r\n\r\n # shifted index to match what the data should be centered on\r\n ShiftInd = np.hstack([np.linspace(0., locpoint, int(cmap.N / 2),\r\n endpoint=False),\r\n np.linspace(locpoint, 1., int(cmap.N / 2))])\r\n\r\n # associate the regular cmap's colours with the newly shifted cmap colour\r\n for RI, SI in zip(RegInd, ShiftInd):\r\n\r\n # get standard indexation of red, green, blue, alpha\r\n r, g, b, a = cmap(RI)\r\n\r\n cdict['red'].append((SI, r, r))\r\n cdict['green'].append((SI, g, g))\r\n cdict['blue'].append((SI, b, b))\r\n cdict['alpha'].append((SI, a, a))\r\n\r\n return LinearSegmentedColormap(name, cdict)", "def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )", "def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name=\"shiftedcmap\"):\n cdict = {\"red\": [], \"green\": [], \"blue\": [], \"alpha\": []}\n\n # regular index to compute the colors\n reg_index = np.linspace(start, stop, 257)\n\n # shifted index to match the data\n shift_index = np.hstack(\n [\n np.linspace(0.0, midpoint, 128, endpoint=False),\n np.linspace(midpoint, 1.0, 129, endpoint=True),\n ]\n )\n\n for ri, si in zip(reg_index, shift_index):\n r, g, b, a = cmap(ri)\n\n cdict[\"red\"].append((si, r, r))\n cdict[\"green\"].append((si, g, g))\n cdict[\"blue\"].append((si, b, b))\n cdict[\"alpha\"].append((si, a, a))\n\n newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)\n\n return newcmap", "def _move_cursors_to_pos(self):\n for axis in range(3):\n x, y = self._vox[list(self._xy_idx[axis])]\n self._images['cursor_v'][axis].set_xdata([x, x])\n self._images['cursor_h'][axis].set_ydata([y, y])\n self._zoom(0) # doesn't actually zoom just resets view to center\n self._update_images(draw=True)\n self._update_moved()", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def changeColor(self):\n self.layer.new_colormap()", "def shifted_color_map(cmap, start=0, midpoint=0.5, stop=1.0,\n name='shiftedcmap', data=None):\n if data is not None:\n midpoint = midpoint_to_shift_color_to_zero(data)\n\n cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []}\n\n # regular index to compute the colors\n reg_index = np.linspace(start, stop, 257)\n\n # shifted index to match the data\n shift_index = np.hstack([\n np.linspace(0.0, midpoint, 128, endpoint=False),\n np.linspace(midpoint, 1.0, 129, endpoint=True)])\n\n for ri, si in zip(reg_index, shift_index):\n r, g, b, a = cmap(ri)\n cdict['red'].append((si, r, r))\n cdict['green'].append((si, g, g))\n cdict['blue'].append((si, b, b))\n cdict['alpha'].append((si, a, a))\n\n newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)\n plt.register_cmap(cmap=newcmap)\n\n return newcmap", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def set_zooming_mouse(self):\n # Zooming: right button mouse\n self.set('RightClickMove', 'Zoom',\n param_getter=lambda p: (p[\"mouse_position_diff\"][0]*2.5,\n p[\"mouse_press_position\"][0],\n p[\"mouse_position_diff\"][1]*2.5,\n p[\"mouse_press_position\"][1]))", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def enableZoomOut(self):\n self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)\n self.master.config(cursor = \"cross\")", "def mouseReleaseEvent(self, event):\n width = self.frameGeometry().width()\n height = self.frameGeometry().height()\n cursor = QtGui.QCursor()\n new_pos = self.mapFromGlobal(cursor.pos())\n x = new_pos.x()\n y = new_pos.y()\n self.__selector_y = y/float(height) # normalized value of the y position\n \tself.__selector_x = x/float(width) #normalised value of the x position\n self.updatePixelColor()\n self.repaint()", "def mouseMoveEvent(self, e):\n if e.pos().y() == self.offset:\n return\n adder = (self.offset - e.y())\n self.deltacount += adder\n #adder *= self.accelerator\n adder *= (abs(adder) * 0.01)\n #self._state[0] = max(self._min[0], min(self._max[0], self._state[0] + adder))\n QtGui.qApp.emit( QtCore.SIGNAL(\"deltaChanged\"), self, adder)\n #self._param.update()\n QtGui.QCursor.setPos(self.origo)", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def shift(x, row_ind, col_ind, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='constant', cval=0.):\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = row_ind - (h / 2)\n ty = col_ind - (w / 2) \n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n\n transform_matrix = translation_matrix # no need to do offset\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n \n return x", "def mouseMoveEvent(self, ev):\n shift = ev.modifiers() & QtCore.Qt.ShiftModifier\n ctrl = ev.modifiers() & QtCore.Qt.ControlModifier\n if shift:\n y = ev.pos().y()\n if not hasattr(self, '_prev_zoom_pos') or not self._prev_zoom_pos:\n self._prev_zoom_pos = y\n return\n dy = y - self._prev_zoom_pos\n def delta():\n return -dy * 5\n ev.delta = delta\n self._prev_zoom_pos = y\n self.wheelEvent(ev)\n elif ctrl:\n pos = ev.pos().x(), ev.pos().y()\n if not hasattr(self, '_prev_pan_pos') or not self._prev_pan_pos:\n self._prev_pan_pos = pos\n return\n dx = pos[0] - self._prev_pan_pos[0]\n dy = pos[1] - self._prev_pan_pos[1]\n self.pan(dx, dy, 0, relative=True)\n self._prev_pan_pos = pos\n else:\n super(PlotObject, self).mouseMoveEvent(ev)", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def set_zoombox_keyboard(self):\n # Idem but with CTRL + left button mouse \n self.set('LeftClickMove', 'ZoomBox',\n key_modifier='Control',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)", "def mousePressEvent(self, event):\n self.dragging = True\n self.moved = False\n self.parent.setCursor(QtCore.Qt.ClosedHandCursor)", "def _updateColormapImage(self, *args, **kwargs):\n if self._colormapImage is not None:\n self._colormapImage = None\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def keypress(self, event):\n stepsize = 0.15 # pixels, i.e. 0.05 arcsec\n if event.key == 'right':\n if self.ccd == 2:\n self.shift_crpix1 -= stepsize\n else:\n self.shift_crpix2 += stepsize\n elif event.key == 'left':\n if self.ccd == 2:\n self.shift_crpix1 += stepsize\n else:\n self.shift_crpix2 -= stepsize\n elif event.key == ']':\n if self.ccd == 2:\n self.shift_crpix1 -= 5*stepsize\n else:\n self.shift_crpix2 += 5*stepsize\n elif event.key == '[':\n if self.ccd == 2:\n self.shift_crpix1 += 5*stepsize\n else:\n self.shift_crpix2 -= 5*stepsize\n elif event.key == 'up':\n if self.ccd == 2:\n self.shift_crpix2 += stepsize\n else:\n self.shift_crpix1 += stepsize\n elif event.key == 'down':\n if self.ccd == 2:\n self.shift_crpix2 -= stepsize\n else:\n self.shift_crpix1 -= stepsize\n elif event.key == \"'\":\n if self.ccd == 2:\n self.shift_crpix2 += 5*stepsize\n else:\n self.shift_crpix1 += 5*stepsize\n elif event.key == '/':\n if self.ccd == 2:\n self.shift_crpix2 -= 5*stepsize\n else:\n self.shift_crpix1 -= 5*stepsize\n elif event.key == '1':\n self.shift_cd1_1 += 1e-8\n elif event.key == '2':\n self.shift_cd1_1 -= 1e-8\n elif event.key == '3':\n self.shift_cd1_2 += 1e-8\n elif event.key == '4':\n self.shift_cd1_2 -= 1e-8\n elif event.key == '5':\n self.shift_cd2_1 += 1e-8\n elif event.key == '6':\n self.shift_cd2_1 -= 1e-8\n elif event.key == '7':\n self.shift_cd2_2 += 2e-8\n elif event.key == '8':\n self.shift_cd2_2 -= 2e-8\n elif event.key == 'r': # Reset\n self.shift_crpix1 = 0.0\n self.shift_crpix2 = 0.0\n self.shift_cd1_1 = 0.0\n self.shift_cd1_2 = 0.0\n self.shift_cd2_1 = 0.0\n self.shift_cd2_2 = 0.0\n elif event.key == 'o': # OK, field does not need fix\n self.mark_done()\n elif event.key == 'w': # Write WCS\n self.write() # Save the results to a csv file\n self.mark_done()\n # Now update the plot for the slight change in WCS parameters\n self.update()", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def mouse_middle_down(self):\n pass", "def _hotswap(self, color: 'Color') -> 'Color':\n\n self._space, self._coords = self.CS_MAP[color.space()], color[:]\n return self", "def normalize_cmap(self):\n vmax, vmin = np.max(self.values), np.min(self.values)\n self.midpoint = 1 - vmax/(vmax + abs(vmin))\n if self.midpoint > 0.5:\n self.start, self.stop = 0, 0.5 + (1-self.midpoint)\n else:\n self.start, self.stop = 0.5 - self.midpoint, 1", "def change_layer_with_keys(self, event):\n if event.key in (pg.K_w, pg.K_UP):\n index = (LAYERS.index(self.map_state.layer)-1)%len(LAYERS)\n self.layer_select.buttons[index].press()\n elif event.key in (pg.K_s, pg.K_DOWN):\n index = (LAYERS.index(self.map_state.layer)+1)%len(LAYERS)\n self.layer_select.buttons[index].press()", "def c_map(val):\n return int(remap(val, -1, 1, 0, 255))", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)", "def shift_coordinate_grid(self, pm_coord, shift_epoch):\n\n # Replace pixel data / WCS with copy centred on source\n contour_background = ContourCutout(\n self.survey,\n pm_coord,\n self.size,\n band=self.band,\n )\n self.data = contour_background.data\n self.wcs = contour_background.wcs\n \n # Astropy for some reason can't decide on calling this pm_ra or pm_ra_cosdec\n try:\n pm_ra = pm_coord.pm_ra\n except AttributeError as e:\n pm_ra = pm_coord.pm_ra_cosdec\n\n # Update CRVAL coordinates based on propagated proper motion\n orig_pos = SkyCoord(\n ra=self.wcs.wcs.crval[0] * u.deg,\n dec=self.wcs.wcs.crval[1] * u.deg,\n frame='icrs',\n distance=pm_coord.distance,\n pm_ra_cosdec=pm_ra,\n pm_dec=pm_coord.pm_dec,\n obstime=pm_coord.obstime,\n )\n newpos = orig_pos.apply_space_motion(shift_epoch)\n\n self.wcs.wcs.crval = [newpos.ra.deg, newpos.dec.deg]\n\n return", "def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos", "def set_zoombox_keyboard(self):\n self.set('MiddleClickMove', 'ZoomBox',\n # key_modifier='Shift',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def set_zoombox_keyboard(self):\n self.set('MiddleClickMove', 'ZoomBox',\n # key_modifier='Shift',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))", "def SetColorMap(self, colormap, contrast=None, bias=None):\n extra = ''\n if contrast is not None:\n extra += '%f,' % contrast\n if bias is not None:\n extra += '%f,' % bias\n fmt = dict(wid=self.wid,cmap=colormap, extra=extra, suffix=self.suffix)\n command = \"JS9.SetColormap('{cmap}', {extra} {{display:'{wid}{suffix}'}});\".format(**fmt)\n get_ipython().run_cell_magic('javascript', '', command)", "def mouseReleaseEvent(self, ev):\n super(PlotObject, self).mouseReleaseEvent(ev)\n if self._downpos == ev.pos():\n x = ev.pos().x()\n y = ev.pos().y()\n if ev.button() == 2 :\n self.mPosition()\n elif ev.button() == 1:\n x = x - self.width() / 2\n y = y - self.height() / 2\n #self.pan(-x, -y, 0, relative=True)\n print(self.opts['center'])\n print(x,y)\n self._prev_zoom_pos = None\n self._prev_pan_pos = None", "def update_ballpos(self,pos):\n if self.options.visualize_switch_xy:\n self.col.set_offsets(pos[:,::-1]) # reverse x-y direction\n else:\n self.col.set_offsets(pos)", "def move3dCursor(p = (0,0,0)):\n bpy.context.scene.cursor_location = p\n # bpy.context.space_data.cursor_location = p", "def _set_cursor(self, enabled):\n if enabled:\n cursor = (backend_tools.Cursors.RESIZE_HORIZONTAL\n if self.direction == 'horizontal' else\n backend_tools.Cursors.RESIZE_VERTICAL)\n else:\n cursor = backend_tools.Cursors.POINTER\n\n self.ax.figure.canvas.set_cursor(cursor)", "def change_map_down(self):\n self.change_map(self.current_map_idx + 1)", "def cla(self):\n self.disable_mouse_rotation()\n Axes.cla(self)\n self.grid(rcParams['axes3d.grid'])", "def _onMoveDown(self, event):\n index = self.colorlist.GetSelection()\n self.graphColors.insert(index + 1, self.graphColors.pop(index))\n self._tupleListToStrings()\n self.colorlist.SetSelection(index + 1)\n self._updateButtons(None)", "def _update_cursor(self) -> None:\n # get the brush size (get a local reference in case another process\n # changes it between the different accesses in this method)\n brush_size = self.brush_size\n # if there is not update, return\n if not self.is_cursor_change:\n return\n # otherwise dequeue the update\n self.is_cursor_change = False\n # make a static border ring for the cursor\n ring = make_ring(brush_size - 1, brush_size)\n cursor = make_cursor(ring, self._brush_border_color)\n # make a circle with the current color\n brush_circle = make_circle(brush_size) - ring\n cursor = cursor + make_cursor(brush_circle, self._color)\n # create the pyglet cursor object and set it\n mouse = pyglet_cursor(cursor)\n self._view.set_cursor(mouse)", "def mouse_middle_up(self):\n pass", "def cursor_placement_thread(self):\r\n while self.editing:\r\n # pylint: disable=W0212\r\n with goxapi.Signal._lock:\r\n curses.curs_set(2)\r\n self.win.touchwin()\r\n self.win.refresh()\r\n time.sleep(0.1)\r\n curses.curs_set(0)", "def switch_to_offsets(self):\n\n cdelt1, cdelt2 = proj_plane_pixel_scales(self.wcs)\n ctype = self.wcs.wcs.ctype\n crpix = self.wcs.wcs_world2pix(self.ra, self.dec, 1)\n\n # Create new WCS as Skymapper does weird things with CDELT\n self.wcs = WCS(naxis=2)\n\n # Centre pixel is offset by 1 due to array indexing convention\n # self.wcs.wcs.crpix = [(len(self.data)) / 2 + 1,\n # (len(self.data)) / 2 + 1]\n self.wcs.wcs.crpix = [crpix[0], crpix[1]]\n self.wcs.wcs.crval = [0, 0]\n self.wcs.wcs.cdelt = [-cdelt1, cdelt2]\n self.wcs.wcs.ctype = ctype\n\n if 'radio' in dir(self):\n r_crpix = self.radio.wcs.wcs_world2pix(self.ra, self.dec, 1)\n # self.radio.wcs.wcs.crpix = [(len(self.radio.data)) / 2 + 1,\n # (len(self.radio.data)) / 2 + 1]\n self.radio.wcs.wcs.crpix = [r_crpix[0], r_crpix[1]]\n self.radio.wcs.wcs.crval = [0, 0]\n\n self.offsets = True", "def setColorBarRange(start=1,end=254):\n dislin.colran(start,end)", "def enableZoomIn(self):\n self.zoomInID = self.canvas.mpl_connect('button_press_event', self.onZoomIn)\n self.master.config(cursor = \"cross\")", "def setPlotShift(x,y):\n dislin.trfshf(x,y)", "def mouseMoveEvent(self, event):\r\n start = QtCore.QPointF(self.mapToScene(self._start))\r\n end = QtCore.QPointF(self.mapToScene(event.pos())) \r\n \r\n w = abs((end.x() - start.x()))\r\n h = abs((end.y() - start.y())) \r\n \r\n x = (start.x() + end.x())/2\r\n y = (start.y() + end.y())/2 \r\n \r\n lcx = x - (w/2)\r\n lcy = y - (h/2) \r\n \r\n if self.editRegen:\r\n self._currentBox.setRect(lcx,lcy,w,h)\r\n self.updateSceneDataInCamraSpace()\r\n \r\n if self.itemMovable:\r\n self.updateSceneDataInCamraSpace()\r\n \r\n super(MocapGraphicsView, self).mouseMoveEvent(event)", "def up(self):\r\n self.brush_on = False", "def on_dragg(self, event):\n if str(event.lastevent.button) == \"MouseButton.LEFT\":\n mX = event.xdata\n mY = event.ydata\n if mX and mY:\n if self.current_point is not None:\n self.x[self.current_point] = mX\n self.y[self.current_point] = mY\n self.redraw()", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def _onMoveUp(self, event):\n index = self.colorlist.GetSelection()\n self.graphColors.insert(index - 1, self.graphColors.pop(index))\n self._tupleListToStrings()\n self.colorlist.SetSelection(index - 1)\n self._updateButtons(None)", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return", "def on_mouse_drag(x, y, dx, dy, buttons, modifiers):\n geo = self.geometry\n height = self.window.height - 2 * geo.vertical_margin - 3 -\\\n geo.status_bar_height\n start = geo.horizontal_margin + geo.graph_start_x\n end = self.window.width - 2 * geo.horizontal_margin - \\\n geo.scroll_bar_width - geo.menu_width - 3\n # If the box already exists, update it.\n if self.zoom_box:\n if x < start:\n x = start\n elif x > end:\n x = end\n self.zoom_box.begin_update()\n self.zoom_box.resize(x - self.zoom_start - 2, height - 2)\n self.zoom_box.end_update()\n self.zoom_frame.begin_update()\n self.zoom_frame.resize(x - self.zoom_start, height)\n self.zoom_frame.end_update()\n self.zoom_box_min_x = self.zoom_start\n self.zoom_box_max_x = self.zoom_start + x - self.zoom_start\n self.zoom_box_max_y = self.window.height - geo.vertical_margin\\\n - 3\n self.zoom_box_min_y = self.zoom_box_max_y - height\n if self.zoom_box_min_x > self.zoom_box_max_x:\n self.zoom_box_min_x, self.zoom_box_max_x = \\\n self.zoom_box_max_x, self.zoom_box_min_x\n # Otherwise create a new box.\n else:\n self.zoom_start = x\n self.zoom_box = glydget.Rectangle(x + 1, self.window.height -\\\n self.geometry.vertical_margin - 5, 1,\n height - 2, [255,255,255,155,255,255,255,100,\n 255,255,255,200,255,255,255,120])\n self.zoom_frame = glydget.Rectangle(x, self.window.height -\\\n self.geometry.vertical_margin - 3, 1,\n height, (0,0,0,200), filled = False)\n self.zoom_box.build(batch = self.batch, group = self.groups[-1])\n self.zoom_frame.build(batch = self.batch, group = self.groups[-1])\n self.zoom_box_min_x = x\n self.zoom_box_max_x = x+1\n # Push the handlers for the box.\n self.zoom_handlers = \\\n self.win.window.push_handlers(on_mouse_motion, on_mouse_press)", "def blink_cursors(self):\n\n self.cursor_state = not self.cursor_state\n self.update_cursors()", "def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True):\n if not self.cancmap:\n return False\n event.accept()\n if event.state == 'down':\n self.restore_contrast(viewer, msg=msg)", "def ToggleCursor(self):\n self.displaycontrol ^= self.LCD_CURSORON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def down(self):\r\n self.brush_on = True", "def set_cursor(self, row, col):\n self._vim.current.window.cursor = (row, col)", "def reset(self):\n self.xview_moveto(0)\n self.yview_moveto(0)\n self.zoomMap(1, 0, 0)", "def on_user_resize_start(self, event):\n # Save offset for use while dragging.\n self._mouse_drag_offset = self.resize_frame.winfo_rootx() - event.x_root", "def keyCam(self, event):\n dct = {\n \"d\": 0,\n \"s\": 1,\n \"q\": 2,\n \"z\": 3\n }[event.char]\n self.moveAllSeg(dct)", "def change_color_arrow():\n global index_picker\n offset = 0\n if index_picker % 2 == 1:\n offset = 4\n for i in range(3):\n for j in range(2):\n sense.set_pixel(i+1+offset, j, [100, 100, 200])if ARROW[j][i] == 1 else sense.set_pixel(i+1+offset, j, [0, 0, 0])", "def change_cursor(self, cursor):\n self.setCursor(cursor)", "def click(self,x:int=None,y:int=None):\n x = int(x/self.zoom_count)#1.5是缩放比例\n y = int(y/self.zoom_count)\n lParam = win32api.MAKELONG(x, y)\n win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_MOUSEMOVE,wcon.MK_LBUTTON, lParam)\n win32gui.SendMessage(self.ScreenBoardhwnd, wcon.WM_SETCURSOR, self.ScreenBoardhwnd, win32api.MAKELONG(wcon.HTCLIENT, wcon.WM_LBUTTONDOWN))\n # win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_SETCURSOR, 0, 0)\n while (win32api.GetKeyState(wcon.VK_CONTROL) < 0 or\n win32api.GetKeyState(wcon.VK_SHIFT) < 0 or\n win32api.GetKeyState(wcon.VK_MENU) < 0):\n time.sleep(0.005)\n win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_LBUTTONDOWN,\n wcon.MK_LBUTTON, lParam)\n win32gui.PostMessage(self.ScreenBoardhwnd, wcon.WM_LBUTTONUP, 0, lParam)", "def setEditCursor(self, event):\n self.editMode = True\n self.updateCursor(\"X_cursor\")\n self.changeColor(self.lastChanged, self.colors['pentomino'])\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if not (0 <= x < self.rows and 0 <= y < self.cols):\n return\n if not self.gridBusy[x][y]:\n return\n assert len(self.history) >= self.gridBusy[x][y]\n self.lastChanged = self.gridBusy[x][y]\n self.changeColor(self.lastChanged, self.colors['pent_edit'])", "def switch(self, _color = 16):\n\t\tself.pointer.flip()\n\n\t\tif self.pointer.get():\n\t\t\tself.content[0][1] = 3\n\t\t\tself.content[1][1] = 16\n\t\telse:\n\t\t\tself.content[0][1] = 16\n\t\t\tself.content[1][1] = 3", "def _force_rescale(self, setpoint_x, setpoint_y):", "def update_zoom_plot(self):\n self.plot_zoom.setXRange(*self.linear_region.getRegion(), padding=0)", "def keyPressEvent(self, event):\n maxIdx = self.dat3d.shape[2] - 1\n minIdx = 0\n if (event.key() == QtCore.Qt.Key_Left) and (self.currentIndex >= minIdx + 1):\n self.currentIndex -= 1\n self.showImage(self.currentIndex)\n elif (event.key() == QtCore.Qt.Key_Right) and (self.currentIndex <= maxIdx - 1):\n self.currentIndex += 1\n self.showImage(self.currentIndex)", "def change_map_up(self):\n if self.current_map_idx > 0:\n self.change_map(self.current_map_idx + 1)", "def __move_to(self, event):\n self.canvas_image.scan_dragto(event.x, event.y, gain=1)\n self.to_coord = (event.x, event.y)\n self.__show_image() # zoom tile and show it on the canvas", "def on_keydown(key):\n global source_img, source_msk\n\n def next_image():\n return False\n\n def increase_shape_size():\n global SHAPE_SIZE\n SHAPE_SIZE = min(64, SHAPE_SIZE+SHAPE_SIZE_INC)\n return True\n\n def decrease_shape_size():\n global SHAPE_SIZE\n SHAPE_SIZE = max(1, SHAPE_SIZE-SHAPE_SIZE_INC)\n return True\n\n def clear_mask():\n global source_msk\n source_msk *= 0\n return True\n\n def display_help():\n global show_help, show_help_timestamp\n show_help_timestamp = datetime.now()\n show_help = True\n return True\n\n def stop_editing():\n raise StopIteration\n\n def set_current_label(value):\n global CURRENT_LABEL\n CURRENT_LABEL = value\n\n def set_mode_point():\n \"\"\"\n default point drawing mode\n press CTRL on mousemove to draw\n \"\"\"\n global DRAW_MODE\n DRAW_MODE=\"point\"\n\n def set_mode_line():\n \"\"\"\n start drawing in line mode\n if already in line mode, commit a line to the mask and start anew\n \"\"\"\n global DRAW_MODE, CURRENT_LABEL, SHAPE_SIZE\n global mouse_pos, line_start_pos\n\n if DRAW_MODE==\"line\":\n # draw the line on the mask\n cv.line(source_msk, line_start_pos, mouse_pos, CURRENT_LABEL, thickness=SHAPE_SIZE)\n\n line_start_pos = mouse_pos\n DRAW_MODE=\"line\"\n\n def flood_fill():\n \"\"\"\n flood fill a region in the mask\n FIXME: we really need undo for this!\n \"\"\"\n global CURRENT_LABEL\n global mouse_pos\n\n im_mask = (source_msk==CURRENT_LABEL).astype(np.uint8)\n cv.floodFill(im_mask, None, mouse_pos, CURRENT_LABEL)\n source_msk[im_mask!=0] = CURRENT_LABEL\n\n # function map\n fns = {\n ord(' '): next_image,\n ord('+'): increase_shape_size,\n ord('-'): decrease_shape_size,\n ord('x'): clear_mask,\n ord('h'): display_help,\n 27: stop_editing,\n ord('0'): lambda: set_current_label(0),\n ord('1'): lambda: set_current_label(1),\n ord('2'): lambda: set_current_label(2),\n ord('3'): lambda: set_current_label(3),\n ord('4'): lambda: set_current_label(4),\n ord('5'): lambda: set_current_label(5),\n ord('6'): lambda: set_current_label(6),\n ord('7'): lambda: set_current_label(7),\n ord('s'): set_mode_line,\n ord('a'): set_mode_point,\n ord('f'): flood_fill\n }\n\n try:\n return fns[key]()\n except KeyError:\n # FIXME: value 255 is not handled, what is 255? should we do a noop?\n #logger.warning(\"don't handle '%i'\" % key)\n pass", "def mouseImage(event, x, y, flags, param):\n \n if event==cv.CV_EVENT_LBUTTONDOWN: #Clicked the left button\n print \"x, y are\", x, y\n (b,g,r) = D.image[y,x]\n print \"r,g,b is\", int(r), int(g), int(b)\n (h,s,v) = D.hsv[y,x]\n print \"h,s,v is\", int(h), int(s), int(v)\n D.down_coord = (x,y)\n D.mouse_down = True\n \n elif event==cv.CV_EVENT_LBUTTONUP: #Let go of the left button\n print \"x, y are\", x, y\n (b,g,r) = D.image[y,x]\n print \"r,g,b is\", int(r), int(g), int(b)\n (h,s,v) = D.hsv[y,x]\n print \"h,s,v is\", int(h), int(s), int(v)\n D.up_coord = (x,y)\n D.mouse_down = False\n\n if D.mode == \"clear\":\n D.sections = []\n else: #Start, add, or subtract -- put lower coordinates first\n x0, y0, x1, y1 = D.down_coord[0], D.down_coord[1], D.up_coord[0], D.up_coord[1]\n\n if x0 > x1:\n x0, x1 = x1, x0\n if y0 > y1:\n y0, y1 = y1, y0\n \n if D.mode == \"start\":\n D.sections = []\n mode_dict = {\"start\":'a', \"add\":'a', \"subtract\":'s'}\n D.sections.append([mode_dict[D.mode], (x0, y0), (x1, y1)])\n ImageProcessing.process_section(D)\n\n\n elif event == cv.CV_EVENT_RBUTTONDOWN: #Right click\n D.target_coord = (x, y)\n ImageProcessing.target_coord(D)\n\n\n elif D.mouse_down and event==cv.CV_EVENT_MOUSEMOVE: #Mouse just moved\n D.up_coord = (x,y)", "def create_colormap(seg_map):\n\tcolormap = np.zeros((256, 3), dtype=int)\n\tind = np.arange(256, dtype=int)\n\tfor shift in reversed(range(8)):\n\t\tfor channel in range(3):\n\t\t\tcolormap[:, channel] |= ((ind >> channel) & 1) << shift \n\t\tind >>= 3\n\treturn colormap[seg_map]", "def disableZoomOut(self):\n self.canvas.mpl_disconnect(self.zoomOutID)\n self.master.config(cursor = \"arrow\")", "def setAutoColorResolution(nx, ny):\n dislin.autres(nx,ny)", "def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def on_resize(self, _: int = 0) -> None:\n assert CursesMenu.stdscr is not None\n screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()\n curses.resizeterm(screen_rows, screen_cols)\n self.draw()", "def linux_zoomer_minus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def shift(self):\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.size - self.mid_pixel - r\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n image_shift = np.roll(self.image,shift=x,axis=0)\n self.image = np.roll(image_shift,shift=y,axis=1)\n \n return", "def shift(self):\n \"\"\"\n shift cluster randomly within bounds of im\n \"\"\"\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.im_size - self.mid_pixel - r - 10\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n im_shift = np.roll(self.im,shift=y,axis=0)\n self.im = np.roll(im_shift,shift=x,axis=1)\n \n return", "def adjust_hue(image, delta):\r\n image[..., 0] = np.mod(image[..., 0] + delta * 180, 180)\r\n return image", "def colorfull_house(self):\n image = self.__image.copy()\n palette_path = self.get_image_path(\"color_palette.png\")\n self.color_palette = cv.resize(cv.imread(palette_path, 1),(598,245))\n self.pixel_picked = np.array([0,0,0])\n\n cv.namedWindow(\"Color Palette\")\n cv.setMouseCallback(\"Color Palette\", self.click_mouse_callback)\n cv.imshow(\"Color Palette\", self.color_palette)\n\n while (True):\n for i in iter(range(self.__rows)):\n for j in iter(range(self.__cols)):\n if self.segmentation_condition(i,j):\n image[i,j] = self.pixel_picked\n \n cv.imshow(\"Colorfull House\",image)\n if (cv.waitKey(1) & 0xFF == ord('q')):\n break", "def clearEditCursor(self, event):\n self.editMode = False\n self.updateCursor(\"arrow\")\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n self.paintBackground(x, y, self.checkFree(x, y))", "def on_cmyk_slide(self,c,m,y,k):\n if not self.active:\n return\n cyan = c / 100.0\n magenta = m / 100.0\n yellow = y / 100.0\n black = k / 100.0\n self.cmyk = colormodel.CMYK(cyan, magenta, yellow, black)\n temp = a3.cmyk_to_rgb(self.cmyk)\n assert (temp == None or type(temp) == colormodel.RGB), 'cmyk_to_rgb does not return a RGB object'\n self.rgb = self.rgb if temp is None else temp\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.update()", "def onLeftDClick(self, event):\n\n # ignore next Left UP event\n self.ignore_next_up = True\n\n # should ignore double-click off the map, but within view\n # a possible workaround is to limit minimum view level\n\n # get view coords of mouse double click, want same centre afterwards\n (x, y) = event.GetPositionTuple()\n\n if event.ShiftDown():\n # zoom out if shift key also down\n if self.use_level(self.level-1):\n self.zoomOut(x, y)\n else:\n # zoom in\n if self.use_level(self.level+1):\n self.zoomIn(x, y)\n\n self.handleMousePositionCallback((x, y))", "def dnd_commit(self, source, event):\n\n if self._dnd_target and hasattr(source, 'rgb'):\n new_rgb = source.rgb\n if new_rgb != self.rgb:\n self.rgb = new_rgb\n if 'w' in self._mode:\n self.color_var.set(new_rgb)\n self._canvas['cursor'] = self._canvas_cursor", "def shift(self, xOffset, yOffset):\n if xOffset >= self.width or xOffset <= -self.width or \\\n yOffset >= self.height or yOffset <= -self.height:\n # If either offset is greater than the width/height, just clear the\n # entire canvas.\n self.clear()\n return\n\n # Get the appropriate range objects.\n if xOffset > 0:\n xRangeObj = range(self.width - 1 - xOffset, -1, -1)\n else:\n xRangeObj = range(self.width - xOffset)\n\n if yOffset > 0:\n yRangeObj = range(self.height - 1 - yOffset, -1, -1)\n else:\n yRangeObj = range(self.height - yOffset)\n\n for x in xRangeObj:\n for y in yRangeObj:\n self[x + xOffset, y + yOffset] = self[x, y]\n\n # Clear the old, original cells.\n # TODO - this can be made more efficient by not clearing the overlapping regions twice.\n if xOffset >= 0:\n for x in range(xOffset):\n for y in range(self.height):\n del self[x, y]\n else:\n for x in range(self.width - 1 - xOffset, self.width):\n for y in range(self.height):\n del self[x, y]\n\n if yOffset >= 0:\n for x in range(self.width):\n for y in range(yOffset):\n del self[x, y]\n else:\n for x in range(self.width):\n for y in range(self.height - 1 - yOffset):\n del self[x, y]", "def on_mouse_move(self, event):\n if event.is_dragging and event.buttons[0] == 1:\n x0, y0 = event.last_event.pos[0], event.last_event.pos[1]\n x1, y1 = event.pos[0], event.pos[1]\n X0, Y0, Z0 = self.pixel_to_coords(float(x0), float(y0))\n X1, Y1, Z1 = self.pixel_to_coords(float(x1), float(y1))\n self.translate_center(X1 - X0, Y1 - Y0, Z1 - Z0)", "def expandColorBarScaling(direction='none'):\n ddict = {'none':'NONE','down':'FIRST','both':'BOTH'}\n dislin.expzlb(ddict[direction])", "def pixel_space(self):\n self.drawer.settransform()\n self.coordspace_bbox = [0, 0, self.width, self.height]\n self.coordspace_transform = (1, 0, 0,\n 0, 1, 0)", "def adjust_visual(self):\n\n if (self.direction is bs.Direction.LEFT):\n self.rect.x -= 0.5 * CELL_SIZE", "def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])", "def handlerSliderLayerTransparencyMoved(self, newPosition):\r\n self.spinBoxLayerTransparency.setValue(newPosition)" ]
[ "0.5790858", "0.57863927", "0.5722508", "0.56208885", "0.5615264", "0.56151456", "0.54330593", "0.54213333", "0.52664816", "0.52410126", "0.5228845", "0.5213258", "0.520547", "0.5198562", "0.5171602", "0.5124414", "0.5097131", "0.5093487", "0.5091165", "0.5089418", "0.50846124", "0.50455993", "0.5041792", "0.50262266", "0.5013308", "0.501157", "0.50101125", "0.4981155", "0.49806538", "0.49777427", "0.4936814", "0.49192968", "0.4917243", "0.49103424", "0.49103424", "0.49064353", "0.49061906", "0.4902981", "0.4900507", "0.4897992", "0.4897655", "0.48968306", "0.48859116", "0.4872", "0.48569572", "0.4842586", "0.48337427", "0.48307508", "0.4821517", "0.4813736", "0.48136312", "0.4807642", "0.47978318", "0.4797398", "0.4787477", "0.47681853", "0.47681186", "0.4762", "0.47487217", "0.4748313", "0.4745713", "0.47433183", "0.47429502", "0.47424775", "0.4741377", "0.47315195", "0.47234246", "0.47192448", "0.47116688", "0.47109753", "0.47083792", "0.47074467", "0.47069067", "0.46995488", "0.46916345", "0.46871102", "0.4676854", "0.46732047", "0.46726766", "0.46723196", "0.46703327", "0.4668997", "0.46677127", "0.46671835", "0.46615365", "0.46588725", "0.46566576", "0.46505356", "0.4639413", "0.46361026", "0.46338066", "0.46318847", "0.4630737", "0.4629406", "0.46282455", "0.46269557", "0.46240127", "0.46228322", "0.46091798", "0.46064696" ]
0.58629936
0
An interactive way to restore the colormap contrast settings after a warp operation.
def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True): if not self.cancmap: return False event.accept() if event.state == 'down': self.restore_contrast(viewer, msg=msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ms_contrast(self, viewer, event, data_x, data_y, msg=True):\n if not self.cancmap:\n return False\n event.accept()\n msg = self.settings.get('msg_contrast', msg)\n\n x, y = self.get_win_xy(viewer)\n\n if event.state == 'move':\n self._tweak_colormap(viewer, x, y, 'preview')\n\n elif event.state == 'down':\n self._start_x, self._start_y = x, y\n if msg:\n self.onscreen_message(\n \"Shift and stretch colormap (drag mouse)\", delay=1.0)\n else:\n self.onscreen_message(None)", "def changeColor(self):\n self.layer.new_colormap()", "def change_contrast(self, b):\n self.fft_plot.set_clim(0, self.contrast_slider.value * self.v_range)\n clear_output()\n display(self.fig)", "def set_contrast(value):\n command([0x21, 0x14, value, 0x20, 0x0c])", "def _on_colormap_change(self, event=None):\n with self.layer.events.colormap.blocker():\n self.colormap_combobox.setCurrentIndex(\n self.colormap_combobox.findData(self.layer.colormap)\n )", "def set_contrast(level):\n send_command(0x81)\n send_command(level)", "def SetColorMap(self, colormap, contrast=None, bias=None):\n extra = ''\n if contrast is not None:\n extra += '%f,' % contrast\n if bias is not None:\n extra += '%f,' % bias\n fmt = dict(wid=self.wid,cmap=colormap, extra=extra, suffix=self.suffix)\n command = \"JS9.SetColormap('{cmap}', {extra} {{display:'{wid}{suffix}'}});\".format(**fmt)\n get_ipython().run_cell_magic('javascript', '', command)", "def test_colormap():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(colors=['k', 'w', 'r'],\n controls=[0.0, 0.1, 1.0]),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_kwr.png\")", "def update_colormap(self, to_overlay=None, **kwargs):\n if self._n_overlay >= 1:\n overlay = self._n_overlay - 1 if to_overlay is None else to_overlay\n # Define the colormap data :\n data_lim = self._data_lim[overlay]\n col = np.linspace(data_lim[0], data_lim[1], LUT_LEN)\n self._text2d_data[overlay, ...] = Colormap(**kwargs).to_rgba(col)\n self._text2d.set_data(self._text2d_data)\n self.update()", "def test_colormap_discrete():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(colors=['r', 'g', 'b'],\n interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_rgb.png\")", "def image_local_enhance_contrast(image: np.ndarray):\n \n #  Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n image = filters.rank.enhance_contrast(image, morphology.disk(2))\n\n #  Resize the iamge back to a shape of (2304, )\n return image_as_array(image)", "def _updateColormapImage(self, *args, **kwargs):\n if self._colormapImage is not None:\n self._colormapImage = None\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def set_cmap_cb(self, w, index):\n old_cmap_name = self._cmap_name\n name = cmap.get_names()[index]\n self.cmap_name = name\n self.pipeline.push(StageAction(self,\n dict(cmap_name=old_cmap_name),\n dict(cmap_name=self._cmap_name),\n descr=\"rgbmap / change cmap\"))\n\n self.pipeline.run_from(self)", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour,mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def test_colormap_coolwarm():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap='coolwarm', clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_coolwarm.png\")", "def setColourMap(self):\n cmap = self.config['cmap']\n\n pos, colour, mode = colourMaps.colourMaps(cmap)\n\n cmap = pg.ColorMap(pos, colour, mode)\n self.lut = cmap.getLookupTable(0.0, 1.0, 256)\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n self.colourStart = (self.config['brightness'] / 100.0 * self.config['contrast'] / 100.0) * (\n maxsg - minsg) + minsg\n self.colourEnd = (maxsg - minsg) * (1.0 - self.config['contrast'] / 100.0) + self.colourStart", "def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])", "def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0", "def restoreRenderSettings():\n bpy.context.scene.render.engine = cache.values[\"engine\"]\n bpy.context.scene.render.film_transparent = cache.values[\"transparent\"]\n\n bpy.context.scene.render.filepath = cache.values[\"filepath\"]\n bpy.context.scene.render.image_settings.file_format = cache.values[\"format\"]\n bpy.context.scene.render.image_settings.color_mode = cache.values[\"mode\"]\n bpy.context.scene.render.image_settings.color_depth = cache.values[\"depth\"]\n\n bpy.context.scene.render.resolution_x = cache.values[\"resolutionX\"]\n bpy.context.scene.render.resolution_y = cache.values[\"resolutionY\"]\n bpy.context.scene.render.resolution_percentage = cache.values[\"percentage\"]\n bpy.context.scene.render.pixel_aspect_x = cache.values[\"aspectX\"]\n bpy.context.scene.render.pixel_aspect_y = cache.values[\"aspectY\"]\n\n if cache.values[\"world\"]:\n bpy.context.scene.world = cache.values[\"world\"]", "def reset_active_settings(self):\n self.compute = yacman.YacAttMap()\n return True", "def unflip_colors(self):\n self.colors[self.bondA] = self.colA\n self.colors[self.bondB] = self.colB\n self.set_bcol(self.bondA)\n self.set_bcol(self.bondB)\n return", "def enhanceContrast(image, mask, target_path, name, save=False):\n \n\n \n # Contrast stretching\n p2, p98 = np.percentile(image, (2, 98))\n image_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))\n \n # Equalization\n image_eq = exposure.equalize_hist(image)\n \n # Adaptive Equalization\n image_adapteq = exposure.equalize_adapthist(image, clip_limit=0.03)\n \n # Display results\n fig = plt.figure(figsize=(19, 13))\n axes = np.zeros((2, 4), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 4, 1)\n for i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image, mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('Low contrast image')\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_rescale, mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('Contrast stretching')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_eq, mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('Histogram equalization')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_adapteq,mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('Adaptive equalization')\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n return image_adapteq", "def resetOpacity(self):\n opa = (0,)\n for i in range(1,256):\n opa += (i,)\n if self._displayPjt:\n self._displayPjt.setOpacityPalette(opa)\n if self._displayUsr:\n self._displayUsr.setOpacityPalette(opa)\n if self._displayVtk:\n self._displayVtk.setOpacityPalette(opa)", "def _adjust_contrast_img(self, results, factor=1.0):\n for key in results.get('img_fields', ['image']):\n img = results[key]\n results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)", "def apply_colormap_on_image(org_im, activation, colormap_name):\n # Get colormap\n color_map = mpl_color_map.get_cmap(colormap_name)\n no_trans_heatmap = color_map(activation)\n # Change alpha channel in colormap to make sure original image is displayed\n heatmap = copy.copy(no_trans_heatmap)\n heatmap[:, :, 3] = 0.4\n heatmap = PILImage.fromarray((heatmap*255).astype(np.uint8))\n no_trans_heatmap = PILImage.fromarray((no_trans_heatmap*255).astype(np.uint8))\n\n # Apply heatmap on iamge\n heatmap_on_image = PILImage.new(\"RGBA\", org_im.size)\n heatmap_on_image = PILImage.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))\n heatmap_on_image = PILImage.alpha_composite(heatmap_on_image, heatmap)\n return no_trans_heatmap, heatmap_on_image", "def contrastSide(trim = True, exclOverlap = False, exclY = True):\n\t\n\tfig = plt.figure()\n\ttitle = \"Effect of contrast - exclOverlap = %s - exclY = %s\" % (exclOverlap, exclY)\n\tplt.suptitle(title)\n\t\n\tfor sacc in [\"1\", \"2\", \"3\"]:\n\t\t\n\t\tcolList = [\"#ef2929\", \"#3465a4\",\"#73d216\", \"#f57900\"]\n\t\tplt.subplot(1,3, int(sacc))\n\t\tplt.title(\"sacc = %s\"% (sacc))\n\t\t\n\t\t# Exp 1:\n\t\texp = \"004A\"\n\t\tdm1 = getDM.getDM(exp = exp, driftCorr = True, onlyControl = False)\n\n\t\t# This is the same for corrected landing positions (the saccade\n\t\t# doesn't change; only the reference point does)\n\t\tdm1 = dm1.select(\"endX%sNorm != ''\" % sacc, verbose = False)\n\t\tdm1 = dm1.select(\"endX%sNorm > -.5\" % sacc, verbose = False)\n\t\tdm1 = dm1.select(\"endX%sNorm < .5\" % sacc, verbose = False)\n\t\t\n\t\tif exclY:\n\t\t\tdm1 = dm1.select(\"endY%sNorm != ''\" % sacc)\n\t\t\tdm1 = dm1.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\tdm1 = dm1.select(\"endY%sNorm < .5\" % sacc)\n\t\t\t\n\t\tfor dv in [\"endX%sNorm\" % sacc, \"endX%sCorrNorm\" % sacc]:\n\t\t\t\n\t\t\t#If wanted, trim the data\n\t\t\tif trim:\n\t\t\t\t_dm1 = dm1.selectByStdDev(keys = [\"contrast_side\", \"file\"], dv = dv)\n\n\t\t\t# For experiment 1 there are not enough third fixations anyway,\n\t\t\t# not even when not filtering on-object on the y-axis.\n\t\t\tif exp == \"004A\" and sacc == \"3\":\n\t\t\t\t\n\t\t\t\tcolList = [\"#ef2929\", \"#3465a4\"]\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# Get pivot matrix:\n\t\t\tpm = PivotMatrix(_dm1, [\"contrast_side\"], [\"file\"], dv=dv, colsWithin=True)#, xLabels = [\"left\", \"control\", \"right\"])\n\t\t\tcol = colList.pop()\n\t\t\tpm.plot(fig = fig, nLvl1 = 1, colors = [col])\n\n\t\t# Experiment 2 and 3:\n\t\tdv = \"endX%sNorm\" % sacc\n\t\t\n\t\tfor exp in [\"004B\", \"004C\"]:\n\t\t\tif exclY and exp == \"004B\" and sacc == \"3\":\n\t\t\t\tcolList = [\"#ef2929\"]\n\t\t\t \n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif exp == \"004C\" and exclOverlap:\n\t\t\t\tdm = dm.select(\"gap == 'zero'\")\n\t\t\t\n\t\t\tprint \"EXP = \", exp\n\t\t\t\n\t\t\tdm = getDM.getDM(exp = exp, driftCorr = True, onlyControl = False)\n\t\t\t\n\t\t\t# This is the same for corrected landing positions (the saccade\n\t\t\t# doesn't change; only the reference point does)\n\t\t\tdm = dm.select(\"endX%sNorm != ''\" % sacc, verbose = False)\n\t\t\tdm = dm.select(\"endX%sNorm > -.5\" % sacc, verbose = False)\n\t\t\tdm = dm.select(\"endX%sNorm < .5\" % sacc, verbose = False)\n\t\t\t\n\t\t\tif exclY:\n\t\t\t\tdm = dm.select(\"endY%sNorm != ''\" % sacc)\n\t\t\t\tdm = dm.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\t\tdm = dm.select(\"endY%sNorm < .5\" % sacc)\n\n\t\t\t\n\t\t\t#If wanted, trim the data\n\t\t\tif trim:\n\t\t\t\t_dm = dm.selectByStdDev(keys = [\"contrast_side\", \"file\"], dv = dv)\n\t\t\t# Get pivot matrix:\n\t\t\tpm = PivotMatrix(_dm, [\"contrast_side\"], [\"file\"], dv=dv, colsWithin=True)\n\t\t\tcol = colList.pop()\n\t\t\tpm.plot(fig = fig, nLvl1 = 1, colors = [col])\n\t\t\n\t\t# Modify plot:\n\t\tplt.ylim(-.2, .2)\n\t\t\n\t\tplt.legend([\"Exp1 (abs)\", \"Exp1 (corr)\", \"Exp2 (abs)\", \"Exp2 (sim)\"])\n\t\tif sacc == \"3\":\n\t\t\tplt.legend([\"Exp2 (abs)\", \"Exp2 (sim)\"])\n\t\t\tif exclY:\n\t\t\t\tplt.legend([\"Exp2 (sim)\"])\n\t\t\n\t\tplt.axhline(0, color = \"#888a85\", linestyle = \"--\", linewidth = 2)\n\t\n\tplt.savefig(\"%s.png\" % title)", "def popcolor():\r\n _pycolor.setcolor(_colorstack.pop())", "def AutoContrast(img: Image, _: float) -> Image:\n return PIL.ImageOps.autocontrast(img)", "def extreme_contrast(picture: Image) -> Image:\r\n \r\n contrast_pic = copy(picture)\r\n for x, y, (r, g, b) in contrast_pic:\r\n if r <= 127:\r\n r = 0\r\n if r >= 128 and r <= 255:\r\n r = 255\r\n if g <= 127:\r\n g = 0\r\n if g >= 128 and g <= 255:\r\n g = 255 \r\n if b <= 127:\r\n b = 0\r\n if b >= 128 and b <= 255:\r\n b = 255 \r\n contrast_color = create_color(r, g, b)\r\n set_color(contrast_pic, x, y, contrast_color)\r\n return contrast_pic", "def reset(self):\n # must NOT reset color map here, otherwise we loose provided configs by user,\n # which are more important in this case for result images vs whatever the model task specified\n self.class_names = None\n self._map = None", "def enhance_contrast(img):\n for y in range(frame_height):\n for x in range(frame_width):\n if img[y, x, 1] > 100:\n # range of blues to limit of puppet motion 255/(frame_width - 150)\n img[y][x][0] = x*0.4\n if img[y, x, 1] <= 100:\n img[y][x][2] = img[y][x][2]*0.5\n cv2.imwrite(\"contrasted.png\", img)", "def contrastear_img(p, r1, r2, s1, s2):\n img = read_img(p)\n funcion_de_constraste = contrast_function_for_points(r1, r2, s1, s2)\n img_contrasteada = contrastear(img.reshape((-1)), funcion_de_constraste)\n show_imgs([img, img_contrasteada.reshape(img.shape)])", "def adjust_contrast(img, contrast_factor):\n check_type(img)\n\n enhancer = ImageEnhance.Contrast(img)\n img = enhancer.enhance(contrast_factor)\n return img", "def btn_contrast_invert_callback(self):\n self.show_as_waiting(True)\n self.image_proc_selected('Contrast Invert')\n self.show_as_waiting(False)", "def getColormapDialog(self, view):\n return None", "def resetPalette(self):\n pal = (0,0,0)\n for i in range(1,256):\n pal += (i,i,i)\n if self._displayPjt:\n self._displayPjt.setColorPalette(pal)\n if self._displayUsr:\n self._displayUsr.setColorPalette(pal)\n if self._displayVtk:\n self._displayVtk.setColorPalette(pal)", "def modes_off(self):\n bm = self.fitsimage.get_bindmap()\n bm.reset_mode(self.fitsimage)", "def set_contrast_range(self, zmin, zmax):\n self.get_contrast_panel().set_range(zmin, zmax)", "def get_colormap(self):\n return colors.ev_colormaps[self.name]", "def dark(s='dark'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(2)\n camera.status.imgtype = 'DARK'\n camera.status.object = s\n camera.status.update()", "def resetTransformations():\n dislin.trfres()", "def flip_color(self):\n self._spots[constants.CROSSING_LOCATION - 1].flip_color()\n self._spots[constants.CROSSING_LOCATION + constants.NUM_LANES].flip_color()", "def restore_config(self):\n self._clear_previous_windows_assigment()\n self._restart_i3_config()", "def switch_colors(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1][0]\n mutated_genome[index1][0] = mutated_genome[index2][0]\n mutated_genome[index2][0] = temp", "def _propagate_material_settings(self, bm, layer):\n state = layer.state\n\n # Shade Flags\n if not bm.use_mist:\n state.shadeFlags |= hsGMatState.kShadeNoFog # Dead in CWE\n state.shadeFlags |= hsGMatState.kShadeReallyNoFog\n\n if bm.use_shadeless:\n state.shadeFlags |= hsGMatState.kShadeWhite\n\n # Colors\n layer.ambient = utils.color(bpy.context.scene.world.ambient_color)\n layer.preshade = utils.color(bm.diffuse_color)\n layer.runtime = utils.color(bm.diffuse_color)\n layer.specular = utils.color(bm.specular_color)\n\n layer.specularPower = min(100.0, float(bm.specular_hardness))\n layer.LODBias = -1.0 # Seems to be the Plasma default\n\n if bm.emit > 0.0:\n # Use the diffuse colour as the emit, scaled by the emit amount\n # (maximum 2.0, so we'll also scale that by 0.5)\n emit_scale = bm.emit * 0.5\n layer.ambient = hsColorRGBA(bm.diffuse_color.r * emit_scale,\n bm.diffuse_color.g * emit_scale,\n bm.diffuse_color.b * emit_scale,\n 1.0)", "def reset_color():\n global CURRENT_COLOR\n CURRENT_COLOR = 0", "def reset_params(self):\n self.blur = -1\n self.closing = -1\n self.thresh = -1", "def lightness_correction(self):\n points = self.color_lookup_table_points\n lightness_max_value = math.sqrt(3 * (255**2))\n deadpool = list()\n for index, point in enumerate(points[0]):\n point = self.get_value_tuple(index)\n lightness = int(math.sqrt(point[0]**2 + point[1]**2 + point[2]**2) * 255 / lightness_max_value)\n if not self.to_dark < lightness < self.to_bright:\n deadpool.append(index)\n self.color_lookup_table_points = (np.delete(points[0], deadpool),\n np.delete(points[1], deadpool),\n np.delete(points[2], deadpool))\n self.point_count = len(self.color_lookup_table_points[0])", "def reset(self):\n rich_obs = super(ColoredCostmapRandomAisleTurnEnv, self).reset()\n obs = rich_obs.costmap.get_data() # pylint: disable=no-member\n obs = np.expand_dims(obs, -1)\n return obs", "def redo_settings(self):\r\n cF.redo_settings()", "def test_contrast_weights_from_regions():\n img = contrast_weights_from_regions()\n # ensure all regions besides the first one are set to zero\n assert( img.get_data()[:,:,:,1:].sum() == 0.0)\n\n # verify the allen atlas is similar (modulo summation) to our weighted version\n allen = datasets.fetch_atlas_allen_2011()\n atlas_ni = image.load_img(allen.rsn28)\n std_atlas_ni = standardize_atlas(atlas_ni)\n assert( img.get_data().sum() == std_atlas_ni .get_data()[:,:,:,0].sum())", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def get_colormap(self):\n return colors.colormaps[self.name]", "def get_colormap(self):\n return colors.colormaps[self.name]", "def getContrastEnhanced(self, theMin, theMax):\n\t\treturn self.lut_display0(self._ndarray, theMin, theMax)", "def setColorDiffuse(*args):", "def __enhance_image(self, img):\n\n blue = self.g.clahe.apply(img[:,:,0])\n green = self.g.clahe.apply(img[:,:,1])\n red = self.g.clahe.apply(img[:,:,2])\n img[:,:,0] = blue\n img[:,:,1] = green\n img[:,:,2] = red\n return img", "def test_nan_color_copy():\n\n data = np.zeros((16, 16))\n\n f1 = FITSFigure(data)\n f1.show_grayscale()\n f1.set_nan_color('blue')\n\n f2 = FITSFigure(data)\n f2.show_grayscale()\n f2.set_nan_color('red')\n\n assert f1.image.get_cmap()._rgba_bad == (0.0, 0.0, 1.0, 1.0)\n assert f2.image.get_cmap()._rgba_bad == (1.0, 0.0, 0.0, 1.0)", "def _augment(window: np.ndarray,\n reflect_axes: Sequence[int],\n brightness_delta: Optional[float]=None,\n contrast_scale: Optional[float]=None) -> np.ndarray:\n # Do reflections\n for a in reflect_axes:\n np.flip(window, a)\n\n if brightness_delta is not None:\n window += brightness_delta\n\n if contrast_scale is not None:\n window *= contrast_scale\n\n return window", "def restore_export_preset():\n run_mel_command(\"FBXResetExport\")", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def setContrast(self, contrast):\n raise NotImplementedError", "def apply_settings(camera):\r\n camera.clear_mode = 0\r\n camera.exp_mode = \"Internal Trigger\"\r\n camera.readout_port = 0\r\n camera.speed_table_index = 0\r\n camera.gain = 1", "def _reset_image(self):\n print(\"Reseting image\")\n print(self.original_cv_image)\n self.working_image = self.original_cv_image\n self.cv_image = self.working_image\n self.set_image()", "def btn_contrast_stretch_callback(self):\n self.show_as_waiting(True)\n self.image_proc_selected('Contrast Stretching')\n self.show_as_waiting(False)", "def restore_default_highlights(bv=None):\n highlight_set(covdb.total_coverage)\n log.log_info(\"Default highlight colors restored\")", "def adjust_contrast(constrast_factor: float) -> Callable:\n return lambda img: TF.adjust_contrast(img, constrast_factor)", "def recolorRC(src,dst):\n b,g,r=cv2.split(src)\n cv2.addWeighted(b,0.5,g,0.5,0,b) #arguements(in order):first src array,a weight applied\n # to array, scnd src array, a weight applied to array\n # a constant added to the result and a destination array\n cv2.merge((b,b,r),dest) #replace b and g with modified b(which has both and g)", "def set_contrast(self, contrast):\n if isinstance(contrast, int):\n contrast = float(contrast)\n\n _lib.caca_set_dither_contrast.argtypes = [_Dither, ctypes.c_float]\n _lib.caca_set_dither_contrast.restype = ctypes.c_int\n\n return _lib.caca_set_dither_contrast(self, contrast)", "def getColormap(self, view):\n return None", "def LCD_contrast(self, contrast):\n self.send_packet('\\x13' + str([contrast]))", "def resetColor(self):\n self.setColor(255, 255, 255 ,255)", "def restoringbeam(self, channel=-1, polarization=-1):\n return _image.image_restoringbeam(self, channel, polarization)", "def shift_brightness_contrast(image, brightness=-100, contrast=300): \n\tdef vect(a):\n\t\tc = contrast\n\t\tb = 100 * brightness\n\t\tres = ((a - 127.5) * c + 127.5) + b\n\t\tif res <0 :\n\t\t\treturn 0\n\t\tif res > 255:\n\t\t\treturn 255\n\t\treturn res\n\t\n\ttransform = np.vectorize(vect)\n\tdata = transform(fromimage(image)).astype(np.uint8)\n\treturn toimage(data)", "def solarize(img, threshold):\n \n for x, y, col in img:\n\n # Invert the values of all RGB components less than 128,\n # leaving components with higher values unchanged.\n\n red, green, blue = col\n\n if red < threshold:\n red = 255 - red\n\n if green < threshold:\n green = 255 - green\n\n if blue < threshold:\n blue = 255 - blue\n\n col = create_color(red, green, blue)\n set_color(img, x, y, col)", "def shutdown(self, disconnect_from_wm: bool = True) -> None:\n self.keep_going = False\n self._kill_producers()\n logging.info(\"Resetting windows to full opacity...\")\n for window in list_mapped_windows():\n window.set_opacity(1)\n if disconnect_from_wm:\n logging.info(\"Disconnecting from display server...\")\n disconnect_display_conn()", "def _apply_style(self):\n for actor in self.clean_actors:\n if settings.SHADER_STYLE != \"cartoon\":\n style = settings.SHADER_STYLE\n else:\n if self.backend: # notebook backend\n print(\n 'Shader style \"cartoon\" cannot be used in a notebook'\n )\n style = \"off\"\n\n try:\n actor.mesh.reverse() # flip normals\n actor.mesh.lighting(style=style)\n\n actor._mesh.reverse()\n actor._mesh.lighting(style=style)\n except AttributeError:\n pass", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def reset_camera_clipping_range(self):\n self.ResetCameraClippingRange()", "def test_colormap_discrete_nu():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=Colormap(np.array([[0, .75, 0], [.75, .25, .5]]),\n [0., .25, 1.], interpolation='zero'),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_nu.png\")", "def warmup():\n print camera.CoolerOFF()\n camera.status.update()", "def DarkenBitmap(bmp, caption_colour, new_colour):\r\n\r\n image = bmp.ConvertToImage()\r\n red = caption_colour.Red()/float(new_colour.Red())\r\n green = caption_colour.Green()/float(new_colour.Green())\r\n blue = caption_colour.Blue()/float(new_colour.Blue())\r\n image = image.AdjustChannels(red, green, blue)\r\n return image.ConvertToBitmap()", "def set_contrast(self, contrast):\n if contrast < 0 or contrast > 255:\n raise ValueError('Contrast must be a value from 0 to 255 (inclusive).')\n self.command(SSD1306_SETCONTRAST)\n self.command(contrast)", "def get_colormap(self):\n return file_io.load_viscm_colormap(self.path)", "def get_colormap(self):\n return file_io.load_viscm_colormap(self.path)", "def setSurfaceColors(topcolor=-1,bottomcolor=-1):\n dislin.surclr(topcolor, bottomcolor)", "def stopColorLoop():\n b.set_group(1, 'effect', 'none')\n b.set_group(1, 'bri', 254)\n b.set_group(1, 'hue', 8731)\n b.set_group(1, 'sat', 106)", "def change_saturation_conv(image, value):\n image = rescale_image_0255(image)\n image = change_saturation(image, value)\n return rescale_image_01(image)", "def layer_overlay(foreground, background):\n overlaid = foreground.copy()\n negative_space = np.where(foreground[:, :, 3] == 0)\n\n overlaid[negative_space] = background[negative_space]\n\n overlaid[:, :, 3] = 255\n\n return overlaid", "def _updateColormap(self, nxdata):\n cmap_norm = nxdata.plot_style.signal_scale_type\n if cmap_norm is not None:\n self.defaultColormap().setNormalization(\n 'log' if cmap_norm == 'log' else 'linear')", "def colorspace(im, bw=False, replace_alpha=False, **kwargs):\n im = im.convert_color_profile()\n\n is_grayscale = im.mode in ('L', 'LA')\n new_mode = im.mode\n if is_grayscale or bw:\n new_mode = 'L'\n else:\n new_mode = 'RGB'\n\n if im.transparent:\n if replace_alpha:\n im = im.replace_alpha(color=replace_alpha)\n else:\n new_mode = new_mode + 'A'\n\n if im.mode != new_mode:\n im = im.convert(new_mode)\n\n return im", "def contrast_restoration(im, method, remove_bits=1, blur_sigma=0, **kwargs):\n from .util import imhist\n from .classical import histeq\n from .exact import histeq_exact\n hist = imhist(im)\n degraded = degrade_image(im, remove_bits, blur_sigma)\n return (histeq(degraded, hist, **kwargs), None) if method == 'classic' else \\\n histeq_exact(degraded, hist, method=method, return_fails=True, **kwargs)", "def img_contrast(img):\n\n return img.max()-img.min()", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)", "def adaptiveContrast(image, mask, target_path, name, kernel_sizes, save=False):\n\n transforms = []\n for kernel_size in kernel_sizes:\n image_adapteq = exposure.equalize_adapthist(image, kernel_size=kernel_size, clip_limit=0.03)\n transforms.append(image_adapteq)\n \n # Display results\n fig = plt.figure(figsize=(19, 16))\n axes = np.zeros((2, 5), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 5, 1)\n for i in range(1, 5):\n axes[0, i] = fig.add_subplot(2, 5, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 5):\n axes[1, i] = fig.add_subplot(2, 5, 6+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[0], mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('%d' %kernel_sizes[0])\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[1], mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('%d' %kernel_sizes[1])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[2], mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('%d' %kernel_sizes[2])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[3],mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('%d' %kernel_sizes[3])\n \n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[4],mask, mask_cmap, img_cmap,\n axes[:, 4])\n ax_image.set_title('%d' %kernel_sizes[4])\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n\n return image_adapteq", "def OnSetContrast(self, evt=None):\n\t\twith self.playerLock :\n\t\t\t#self.player.video_set_adjust_int( vlc.VideoAdjustOption.Enable, 1 )\n\t\t\tcontrast = self.contrastSlider.GetValue() * 2\n\t\t\tprint( 'new contrast: {}'.format( contrast ) )\n\t\t\tif self.player.video_set_adjust_float( vlc.VideoAdjustOption.Contrast, contrast/100.0 ) == -1:\n\t\t\t\tself.errorDialog(\"Failed to set contrast\")\n\t\t\telse:\n\t\t\t\tself.State.contrast = contrast/2\n\t\t\t\tself.SaveState()", "def reenlightenment(self):\n return self._reenlightenment", "def restore(self):\n\n self.dispersion = self.raw_dispersion\n self.flux = self.raw_flux\n self.flux_err = self.raw_flux_err\n self.reset_mask()", "def enhance_contrast(img):\n # CLAHE (Contrast Limited Adaptive Histogram Equalization)\n clahe = cv2.createCLAHE(clipLimit=3., tileGridSize=(8, 8))\n\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) # convert from BGR to LAB color space\n l, a, b = cv2.split(lab) # split on 3 different channels\n\n l2 = clahe.apply(l) # apply CLAHE to the L-channel\n\n lab = cv2.merge((l2, a, b)) # merge channels\n img2 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) # convert from LAB to BGR\n\n return img2" ]
[ "0.63572615", "0.6100809", "0.5673598", "0.5571644", "0.55231154", "0.551467", "0.547287", "0.5454797", "0.5436428", "0.53191686", "0.52949184", "0.52944916", "0.52695817", "0.5267348", "0.5224302", "0.52170444", "0.5212828", "0.52027863", "0.5189108", "0.5177404", "0.5158237", "0.51526034", "0.5122462", "0.511686", "0.51103014", "0.5106642", "0.5104638", "0.50994325", "0.5090994", "0.50710434", "0.5062269", "0.50081724", "0.49776492", "0.49720633", "0.4966465", "0.4965826", "0.49472967", "0.49268612", "0.4922736", "0.4917202", "0.4894295", "0.48836666", "0.4870975", "0.48523894", "0.48416954", "0.48294023", "0.48210138", "0.4809419", "0.4785852", "0.4769547", "0.47662595", "0.47618854", "0.47618806", "0.47618806", "0.47577566", "0.4753476", "0.4733387", "0.47303945", "0.47292757", "0.47152078", "0.4713041", "0.47065756", "0.47020635", "0.47016427", "0.46947017", "0.46804613", "0.46762794", "0.46696225", "0.46656153", "0.46564066", "0.46466947", "0.46445778", "0.46442133", "0.46436763", "0.46411484", "0.4639434", "0.4639122", "0.46386817", "0.4635227", "0.4633109", "0.46324283", "0.46320885", "0.46290615", "0.4626953", "0.4626953", "0.46246094", "0.46241808", "0.46163425", "0.4614825", "0.46148202", "0.46117663", "0.46104068", "0.46067953", "0.4593913", "0.45908168", "0.4586612", "0.45866108", "0.45833513", "0.4581673", "0.45794046" ]
0.64222914
0
This decorator is meant to decorate management commands. Any exceptions raised in the command's handle method will be logged and reraised.
def log_exceptions(cls): class NewClass(cls): def handle(self, *args, **options): try: super().handle(args, options) except Exception: logger.exception("Management command '{}' failed. Traceback follows: ".format(sys.argv[1])) raise return NewClass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(login_required=True):\n def decorate(f):\n def wrapper(self, *args):\n try:\n return f(self, *args)\n except ApiError as e:\n log_exception(e)\n raise BackendException('dpbx api error \"%s\"' % (e,))\n except Exception as e:\n log_exception(e)\n log.Error('dpbx code error \"%s\"' % (e,), log.ErrorCode.backend_code_error)\n raise\n\n wrapper.__doc__ = f.__doc__\n return wrapper\n return decorate", "def command(wrapped=None, synonyms=(), helphint=\"\", hidden=False,\n chat_only=False, muc_only=False):\n def decorator(fn):\n fn.is_command = True\n fn.synonyms = synonyms\n fn.helphint = helphint\n fn.hidden = hidden\n fn.chat_only = chat_only\n fn.muc_only = muc_only\n return fn\n if wrapped is None:\n return decorator\n else:\n return decorator(wrapped)", "def command(func):\n\n @wraps(func)\n def decorated(args, **kwargs):\n try:\n func(args, **kwargs)\n\n return True\n\n except KeyboardInterrupt:\n pass\n\n except TeapotError as ex:\n LOGGER.error(ex.msg, *ex.msg_args)\n\n if args.debug:\n LOGGER.debug('\\nTraceback is:\\n' + ''.join(traceback.format_tb(sys.exc_info()[2])))\n\n except Exception as ex:\n if args.debug:\n LOGGER.exception(ex)\n else:\n LOGGER.error(str(ex))\n\n LOGGER.error('teapot execution failed.')\n\n return False\n\n return decorated", "def command_(self, name):\n def decorator(func):\n func.__name__ = name\n return self.command(func)\n return decorator", "def _command(self, *cmd, handler=None):", "def command(self, *args, **kwargs):\n help_group = kwargs.pop(\"group\", None)\n decorator = super(GroupedGroup, self).command(*args, **kwargs)\n\n def wrapper(f):\n cmd = decorator(f)\n cmd.help_group = help_group\n return cmd\n\n return wrapper", "def _handle_exceptions(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except Exception as err:\n logger.exception(\n f\"{type(self).__name__}.{f.__name__}(*{args!r}, **{kwargs!r}) failed\"\n )\n content = self.message.content\n self.reply(f\"Oops, the {content} command encountered a problem: {err!r}\")\n\n wrapper._handle_exceptions = True\n return wrapper", "def __new__(cls, *args, **kwargs):\n obj = super(BaseCommandWithLogger, cls).__new__(cls, *args, **kwargs)\n _initial_handle = obj.handle\n\n def handle(self, *args, **kwargs):\n try:\n result = _initial_handle(*args, **kwargs)\n try:\n return json.dumps(result)\n except TypeError:\n return str(result)\n except Exception as exc:\n # If the exception was raised from a command running on the\n # Django Q cluster, we need to raise it again to mark the task\n # as failed and record the exception in the task result.\n #if hasattr(self, 'scheduling_info'):\n raise\n\n obj.handle = types.MethodType(handle, obj)\n return obj", "def create_from_management_command(\n cls,\n *,\n command_cls: Type[BaseCommand],\n args: Tuple[Any, ...],\n kwargs: Mapping[str, Any],\n ) -> BaseContext:\n\n return cast(\n BaseContext,\n cls.objects.create(\n context_type=\"management-command\",\n context={\n # Get the name of the command in the same way Django does in\n # the call_command utility.\n \"command\": command_cls.__module__.split(\".\")[-1],\n # Include args and kwargs in the request log. This handles\n # most common argument types like file etc. For anything not\n # handled by default the user must provide a function that\n # converts the value to a JSON encodeable value.\n \"args\": args,\n \"kwargs\": kwargs,\n },\n ),\n )", "def command_handling(args, log=COMMAND_LOG):\n # Create the Command object\n command = Command(args, None)\n\n # Resume calls are not logged\n if not command.resume:\n u.sys_log_message(command.command.replace('\\\\', '\\\\\\\\'), log_file=log)\n\n return command", "def track_command(func):\n\n def wrapped(*args, **kwargs):\n\n if not _telemetry_enabled():\n # When Telemetry is disabled, call the function immediately and return.\n return func(*args, **kwargs)\n\n telemetry = Telemetry()\n\n exception = None\n return_value = None\n exit_reason = \"success\"\n exit_code = 0\n\n duration_fn = _timer()\n try:\n\n # Execute the function and capture return value. This is returned back by the wrapper\n # First argument of all commands should be the Context\n return_value = func(*args, **kwargs)\n\n except UserException as ex:\n # Capture exception information and re-raise it later so we can first send metrics.\n exception = ex\n exit_code = ex.exit_code\n exit_reason = type(ex).__name__\n\n except Exception as ex:\n exception = ex\n # Standard Unix practice to return exit code 255 on fatal/unhandled exit.\n exit_code = 255\n exit_reason = type(ex).__name__\n\n ctx = Context.get_current_context()\n telemetry.emit(\"commandRun\", {\n # Metric about command's general environment\n \"awsProfileProvided\": bool(ctx.profile),\n \"debugFlagProvided\": bool(ctx.debug),\n \"region\": ctx.region or \"\",\n \"commandName\": ctx.command_path, # Full command path. ex: sam local start-api\n\n # Metric about command's execution characteristics\n \"duration\": duration_fn(),\n \"exitReason\": exit_reason,\n \"exitCode\": exit_code\n })\n\n if exception:\n raise exception # pylint: disable=raising-bad-type\n\n return return_value\n\n return wrapped", "def group(self, *args, **kwargs):\n def decorator(f):\n cmd = group( *args, **kwargs )( f )\n self.add_command(cmd)\n return cmd\n return decorator", "def standard_error_handler(error_function):\n\n async def wrapper(cls, ctx, error):\n\n extra = f\"\\n\\nSee the help message for more information.\"\n\n # This prevents any commands with local handlers being handled here\n if hasattr(ctx.command, \"on_error\"):\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n\n ignored = (commands.CommandNotFound,)\n\n # Anything in ignored will return and prevent anything happening.\n if any([isinstance(error, i) for i in ignored]):\n return\n\n if isinstance(error, DisabledCommand):\n await pretty_print(\n ctx, \"This command is disabled!\", title=\"Error\", color=ERROR_COLOR\n )\n\n elif isinstance(error, MemberNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, RoleNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, NoPrivateMessage):\n await pretty_print(\n ctx,\n \"This command cannot be run in a private message.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, PrivateMessageOnly):\n try:\n await ctx.message.delete()\n extra += \"\\nYour message has been deleted\"\n except:\n print(\"Could not delete message\")\n await pretty_print(\n ctx,\n \"This command should be run in a Private Message only!\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRole):\n await pretty_print(\n ctx, str(error) + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, IllegalRole):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, CheckFailure):\n await pretty_print(\n ctx,\n \"Could not run command, do you have sufficient permissions in this channel?\"\n + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, BadArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Could not run command, is it formatted properly?\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRequiredArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx, \"Missing required arguments\", title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, BadUnionArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Invalid argument\",\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, WalletNotVerified):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, InvalidCoin):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, RequestError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n elif isinstance(error, FatalError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n await error_function(cls, ctx, error)\n\n return wrapper", "async def admin(ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid Command\")", "def handler(self, command, args=[]):\n ###\n # command parsing and handling logic to be implemented by child\n ###\n if not command and not hasattr(self, 'handle_'):\n return f'Service {str(self.__class__.__name__)}: {self.__doc__ or \"\"}'\n methodname = 'handle_{}'.format(command or '')\n logger.info('method name: {}'.format(methodname))\n logger.info('args: {}'.format(args))\n method = self.__getattribute__(methodname)\n return method(args)", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n def add_meta(f):\n def decorator(*args, **kwargs):\n f(*args, **kwargs)\n decorator.bytes_needed = len_args - 1 # exclude self\n decorator.__name__ = f.__name__\n return decorator\n func = add_meta(func)\n self._command_handlers[cmd] = func", "def self_decorator(self, func):\n # TODO: Any other ways to pass variables to handlers?\n def command_func(update, context, *args, **kwargs):\n return func(self, update, context, *args, **kwargs)\n return command_func", "def command(func: 'function') -> 'function':\n func._decorators = (Bot.command,)\n return func", "def command(\n self,\n name: str,\n aliases: list[str] | None = None,\n *,\n subtype: str | None = None,\n short_help: str | None = None,\n help: str | None = None,\n use_shlex: bool = True,\n ) -> DECORATOR_TYPE:\n\n def decorator(target: DECORATOR_ARGS_TYPE) -> Handler:\n handler = get_handler(target)\n\n self.apps.append(\n App(\n \"message\",\n subtype,\n handler,\n name=name,\n aliases=aliases,\n short_help=short_help,\n help=help,\n is_command=True,\n use_shlex=use_shlex,\n ),\n )\n\n return handler\n\n return decorator", "def command(\n self,\n handler: Handler = None,\n *,\n name: str = None,\n aliases: Sequence[str] = (),\n help_text: str = None,\n ) -> CommandProxy:\n\n def inner(func: Handler) -> CommandProxy:\n kwargs = {\"aliases\": aliases}\n\n help_text_ = help_text or func.__doc__\n if help_text_:\n kwargs[\"help\"] = help_text_.strip()\n\n name_ = name or func.__name__\n if asyncio.iscoroutinefunction(func):\n proxy = AsyncCommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n else:\n proxy = CommandProxy(\n func, self._sub_parsers.add_parser(name_, **kwargs)\n )\n\n self._add_handler(proxy, name_, aliases)\n\n return proxy\n\n return inner(handler) if handler else inner", "async def on_command_error(self, ctx: Context, e: errors.CommandError) -> None:\n command = ctx.command\n\n if hasattr(e, \"handled\"):\n log.trace(f\"Command {command} had its error already handled locally; ignoring.\")\n return\n\n debug_message = (\n f\"Command {command} invoked by {ctx.message.author} with error \"\n f\"{e.__class__.__name__}: {e}\"\n )\n\n if isinstance(e, errors.CommandNotFound) and not getattr(ctx, \"invoked_from_error_handler\", False):\n if await self.try_silence(ctx):\n return\n if await self.try_run_fixed_codeblock(ctx):\n return\n await self.try_get_tag(ctx) # Try to look for a tag with the command's name\n elif isinstance(e, errors.UserInputError):\n log.debug(debug_message)\n await self.handle_user_input_error(ctx, e)\n elif isinstance(e, errors.CheckFailure):\n log.debug(debug_message)\n await self.handle_check_failure(ctx, e)\n elif isinstance(e, errors.CommandOnCooldown | errors.MaxConcurrencyReached):\n log.debug(debug_message)\n await ctx.send(e)\n elif isinstance(e, errors.CommandInvokeError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n elif isinstance(e.original, LockedResourceError):\n await ctx.send(f\"{e.original} Please wait for it to finish and try again later.\")\n elif isinstance(e.original, InvalidInfractedUserError):\n await ctx.send(f\"Cannot infract that user. {e.original.reason}\")\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.ConversionError):\n if isinstance(e.original, ResponseCodeError):\n await self.handle_api_error(ctx, e.original)\n else:\n await self.handle_unexpected_error(ctx, e.original)\n elif isinstance(e, errors.DisabledCommand):\n log.debug(debug_message)\n else:\n # ExtensionError\n await self.handle_unexpected_error(ctx, e)", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n\n if hasattr(ctx.command, 'on_command_error'):\n return\n\n # This prevents any cogs with an overwritten cog_command_error being handled here.\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n return\n\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(f'Command pas trouvé')\n return\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{ctx.command} has been disabled.')\n return\n\n if isinstance(error,commands.errors.PrivateMessageOnly):\n await ctx.message.delete()\n channel = await ctx.message.author.create_dm()\n await channel.send(f'{ctx.command} ne peut être exécuté que en message privé !!')\n return\n # For this error example we check to see where it came from...\n if isinstance(error, commands.BadArgument):\n await ctx.send('Mauvais arguments passés')\n return\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('Il manque des arguments à la commande')\n return\n # All other Errors not returned come here. And we can just print the default TraceBack.\n logger.error(f'Ignoring exception in command {ctx.command} : {type(error)} {error} {error.__traceback__}')", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def _handle(self, *args, **options):\n return super()._handle(*args, **options)", "def wrapper(callback):\n self.commands[name] = SlashCommand(callback, name, description, options, guild_ids=guild_ids, default_permission=default_permission, guild_permissions=guild_permissions)", "def additional_command(self):\n pass", "async def on_command_error(\n self, ctx: commands.Context, error: commands.CommandError\n ):\n log.debug(\"The Error Handler was invoked to handle an error\")\n\n trace = \"\".join(\n traceback.format_exception(type(error), error, error.__traceback__)\n )\n trace = trace.strip()\n\n if hasattr(ctx.command, \"on_error\"):\n log.debug(\"Invoked, but will not override command's own error handler\")\n return\n\n cog = ctx.cog\n if cog:\n if cog._get_overridden_method(cog.cog_command_error) is not None:\n log.debug(\"Invoked, but will not override cog's own error handler\")\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n ignored = (commands.CommandNotFound,)\n\n if isinstance(error, ignored):\n log.debug(f\"Ignored exception {type(error)} - {error}\")\n return\n\n # Check for specific exceptions to be handled\n if isinstance(error, commands.DisabledCommand):\n await ctx.send(f\"{ctx.command} has been disabled.\")\n\n elif isinstance(error, commands.CommandOnCooldown):\n try:\n await ctx.send(str(error))\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.author.send(\n f\"{ctx.command} can not be used in Private Messages.\"\n )\n except discord.HTTPException:\n pass\n\n elif isinstance(error, commands.errors.CheckFailure):\n log.debug(f\"A command was called, but a check failed. Trace: \\n{trace}\")\n\n elif isinstance(error, commands.MissingRequiredArgument):\n log.debug(f\"A command was missing a required argument. Trace: \\n{trace}\")\n try:\n await ctx.send(\"```\\nUsage:\\n\" + ctx.command.help + \"```\")\n except discord.HTTPException:\n pass\n\n elif isinstance(error, merrors.MiltonInputError):\n # Send feedback to user\n try:\n await ctx.send(error.msg)\n except discord.HTTPException:\n pass\n\n else:\n # All other Errors not returned come here.\n # Skip the prompt line\n if \"CommandInterface\" in self.bot.cogs:\n print(\"\")\n\n log.error(f\"Ignoring exception in command {ctx.command}:\\n\" f\"{trace}\")\n\n # Re-print the handle for the CLI cog\n if \"CommandInterface\" in self.bot.cogs:\n print(\">> \", end=\"\")", "def command(self):\n raise NotImplementedError", "def command(self, *commands):\n def decorator(function):\n for command in commands:\n self.functions[command] = function\n return function\n return decorator", "def command(name):\n def _decoration(fcn):\n fcn.command = name\n return fcn\n return _decoration", "def command(ctx):\n ctx.setup_logger(format='')", "def _handler(self, bot, update, *args, **kwargs):\n raise NotImplementedError('Not implemented command handler method.')", "def do_command(self, args):\n pass", "def _capture_usage(ctx, *args, **kwargs):\n # protect against early cli failures\n if not ctx.obj or 'config' not in ctx.obj:\n return func(ctx, *args, **kwargs)\n\n config = ctx.obj['config']\n\n # return early if they opted out of sending usage stats\n if hasattr(config, \"collect_analytics\") and not config.collect_analytics:\n return func(ctx, *args, **kwargs)\n\n # add a default username if user is not logged in\n username = \"unknown\"\n if hasattr(config, \"username\"):\n username = config.username\n\n # log payload as a dict\n data = {\n \"channel\": \"cli\",\n \"level\": \"info\",\n \"username\": username,\n \"command\": ctx.command.name,\n \"params\": ctx.params,\n \"platform\": \"{}-{}\".format(platform.system(), platform.release()),\n \"version\": two1.TWO1_VERSION\n }\n\n # send usage payload to the logging server\n requests.post(two1.TWO1_LOGGER_SERVER + \"/logs\", jsonlib.dumps(data))\n\n try:\n # call decorated function and propigate args\n return func(ctx, *args, **kwargs)\n\n # Don't log UnloggedExceptions to the server\n except exceptions.UnloggedException:\n raise\n\n except Exception as ex:\n # protect against early cli failures\n if not ctx.obj or 'config' not in ctx.obj:\n raise ex\n\n # elevate the level to 'error'\n data['level'] = 'error'\n data['exception'] = traceback.format_exc()\n\n # add json data and message from a Two1Error to the data payload\n if isinstance(ex, exceptions.Two1Error) and hasattr(ex, \"_json\"):\n data['json'] = ex._json\n data['message'] = ex._msg\n\n # send usage payload to the logging server\n requests.post(two1.TWO1_LOGGER_SERVER + \"/logs\", jsonlib.dumps(data))\n\n raise ex", "async def on_command_error(self, ctx, error):\n\n # This prevents any commands with local handlers being handled here in on_command_error.\n if hasattr(ctx.command, 'on_error'):\n return\n\n ignored = (commands.CommandNotFound, commands.UserInputError)\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, 'original', error)\n\n # Anything in ignored will return and prevent anything happening.\n if isinstance(error, ignored):\n return\n\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(f'{self.bot.settings.prefix}{ctx.command} has been disabled.')\n return\n\n elif isinstance(error, commands.NoPrivateMessage):\n try:\n await ctx.channel.send(f'{self.bot.settings.prefix}{ctx.command} can not be used in Private Messages.')\n except:\n pass\n return\n\n elif isinstance(error, commands.BadArgument):\n await ctx.send(f'Refer to.{self.bot.settings.prefix}help {ctx.command}')\n return\n\n elif isinstance(error, commands.BotMissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'I need the **{fmt}** permission(s) to run this command.')\n return\n\n if isinstance(error, commands.MissingPermissions):\n missing = [perm.replace('_', ' ').replace('guild', 'server').title() for perm in error.missing_perms]\n if len(missing) > 2:\n fmt = '{}, and {}'.format(\"**, **\".join(missing[:-1]), missing[-1])\n else:\n fmt = ' and '.join(missing)\n await ctx.send(f'You need the **{fmt}** permission(s) to use this command.')\n return\n\n # All other Errors not returned come here... And we can just print the default TraceBack.\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "def setup(bot: commands.Bot) -> None:\n bot.add_cog(CommandErrorHandler(bot))", "def _setup_command(self):\r\n raise NotImplementedError", "def handle_command(log, writer, data):\n\n response = 'BAD: Invalid Command'\n commandList = data.split()\n\n try:\n if commandList[0] == 'expose':\n if len(commandList) == 3:\n if commandList[1] == 'light' or commandList[1] == 'dark' or commandList[1] == 'flat':\n expType = commandList[1]\n expTime = commandList[2]\n try:\n float(expTime)\n if float(expTime) > 0: \n expTime = float(expTime)\n fileName = exposure(expType, expTime)\n response = 'OK\\n'+'FILENAME = '+fileName\n else:\n response = 'BAD: Invalid Exposure Time'\n except ValueError:\n response = 'BAD: Invalid Exposure Time'\n elif len(commandList) == 2:\n if commandList[1] == 'bias':\n expType = commandList[1]\n try: \n fileName = exposure(expType, 0.0)\n response = 'OK\\n'+'FILENAME: '+fileName\n except ValueError:\n response = 'BAD: Invalid Exposure Time'\n elif commandList[0] == 'set':\n if len(commandList) >= 1:\n response = setParams(commandList[1:])\n except IndexError:\n response = 'BAD: Invalid Command'\n \n # tell the client the result of their command & log it\n #log.info('RESPONSE = '+response)\n #writer.write((response+'\\n---------------------------------------------------\\n').encode('utf-8'))\n writer.write((response+'\\nDONE\\n').encode('utf-8'))", "def route(self, command):\n\n def _route(func):\n self._command_hash_views[command] = func\n\n def __route(*args, **kwargs):\n return func(*args, **kwargs)\n\n return __route\n\n return _route", "def management_command(args):\n\n arguments = docopt(management_command.__doc__, args[1:])\n\n try:\n if arguments['version']:\n print cloudmesh.__version__\n elif arguments['user'] and arguments['list']:\n user = Users()\n disp_fmt = None\n user_name = None\n if arguments['--format']:\n disp_fmt = arguments['--format']\n if arguments['USERNAME']:\n user_name = arguments['USERNAME']\n user.list_users(disp_fmt, user_name)\n elif arguments['user'] and arguments['generate']:\n if arguments['--count']:\n count = int(arguments['--count'])\n generate_users(count)\n else:\n generate_users(10)\n elif arguments['user'] and arguments['clear']:\n user = Users()\n user.clear()\n elif arguments['user'] and arguments['delete']:\n if arguments['USERNAME']:\n user = Users()\n user.delete_user(arguments['USERNAME'])\n else:\n print \"Error: Please specify a user to be removed.\"\n elif arguments['user'] and arguments['approve']:\n if arguments['USERNAME']:\n user = Users()\n user.amend_user_status(arguments['USERNAME'], status='approved')\n elif arguments['user'] and arguments['deny']:\n if arguments['USERNAME']:\n user = Users()\n user.amend_user_status(arguments['USERNAME'], status='denied')\n elif arguments['project']:\n print \"Dummy Projects\"\n project_fields()\n elif arguments['list']:\n print \"Listing Users\"\n except:\n print \"Invalid arguments Exception\", sys.exc_info()[0]\n raise", "def setting_handler(setting_name):\n def wrap(command_function):\n if isinstance(setting_name, basestring):\n local_executer.SETTING_FUNCTIONS[setting_name] = command_function\n elif isinstance(setting_name, tuple):\n for name in setting_name:\n local_executer.SETTING_FUNCTIONS[name] = command_function\n else:\n raise Exception('Invalid use of \"local_command\" decorator')\n \n #We don't really want to decorate this function.\n return command_function\n \n return wrap", "def middleware(self, *args, **kwargs):\n return super(Blueprint, self).middleware(*args, **kwargs)", "def execute(self, *args, **options):\r\n context = {\r\n 'command': self.prog_name,\r\n }\r\n COMMAND_CONTEXT_NAME = 'edx.mgmt.command'\r\n with tracker.get_tracker().context(COMMAND_CONTEXT_NAME, context):\r\n super(TrackedCommand, self).execute(*args, **options)", "async def _run_command(self, command, *args, **kwargs):\n pass", "def command():\n pass", "def generic_command(replace_name: Optional[str] = None,\n default_parser: Callable[[Any], str] = default_parser,\n arg_parsers: Optional[Dict[str, Callable[[Any], str]]] = None,\n ignore_args: Optional[List[str]] = None,\n add_class_name: bool = True):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return_value = func(*args, **kwargs)\n\n # Resolve parameters\n nonlocal ignore_args\n ignore_args = (ignore_args or []) + DEFAULT_IGNORE_ARGS\n\n unparsed_kwargs = {}\n if args or kwargs:\n arg_names = func.__code__.co_varnames\n # update with args\n unparsed_kwargs = {key: value for key, value in zip(arg_names[:len(args)], args) if value is not None}\n # update with kwargs\n unparsed_kwargs.update(dict(\n sorted(\n {k: v for k, v in kwargs.items() if v is not None}.items(),\n key=lambda items: arg_names[len(args):].index(items[0]))))\n\n command = \"\"\n\n if add_class_name:\n class_ = utils.get_defining_class(func)\n if class_:\n command += utils.pascal_to_snake_case(class_.__name__).strip('_').replace('_', '-') + ' '\n\n # Set command name\n final_name = func.__name__.strip('_').replace('_', '-')\n if replace_name is not None:\n final_name = replace_name.format(__name__=final_name, **unparsed_kwargs)\n if final_name:\n command += final_name + ' '\n\n default_arg_parser = utils.get_arg_default(generic_command, \"default_parser\")\n final_kwargs = {}\n for key, value in unparsed_kwargs.items():\n # Ignore argument\n if ignore_args and key in ignore_args:\n continue\n\n manual_arg_parser = arg_parsers[key] if arg_parsers and arg_parsers.get(key) else lambda x: None\n final_kwargs[key] = manual_arg_parser(value) or default_parser(value) or default_arg_parser(value)\n\n # Set arguments\n command += ' '.join(final_kwargs.values())\n\n commands.append(command.strip())\n return return_value\n\n return wrapper\n return decorator", "def execute_command(self):\n raise Exception(\"Not implemented\")", "def command(name: str,\n *,\n children: Optional[List[_Command]] = None,\n **kwargs) -> _Decorator:\n if children is None:\n children = []\n\n def decorator(func: _MaybeCommand) -> _Command:\n func = _ensure_command(func)\n if func._name is not None:\n raise CLIError(\"@command should only be used once per function\")\n func._name = name\n func._kwargs = kwargs\n func._set_children(children)\n\n return func\n\n return decorator", "def command(cls, name=None):\n postfix = name\n def decorator(method):\n if postfix is None:\n name = method.__name__\n else:\n name = postfix\n mod = method.__module__\n if mod.startswith('scripts.commands'):\n mod = mod[len('scripts.commands'):]\n mod = mod.lstrip('.')\n if mod == '__main__':\n full_name = name\n else:\n full_name = mod+'.'+name\n\n app = cls\n subcmds = cls.subcommands\n for sub in full_name.split('.')[:-1]:\n if sub not in subcmds:\n sub_app = type(sub+'App', (cli.Application,),{})\n sub_app = app.subcommand(sub)(sub_app)\n subcmds[sub] = (sub_app, {})\n else:\n pass\n\n app, subcmds = subcmds[sub]\n app.__doc__ = importlib.import_module(method.__module__).__doc__\n\n signature = inspect.signature(method)\n arguments = []\n for (arg_name, param) in signature.parameters.items():\n tp = param.annotation\n if isinstance(tp, Option) or isinstance(tp, Flag):\n if tp._names:\n names = tp._names\n else:\n names = ['-'+arg_name[0], '--'+arg_name]\n arguments.append([tp, arg_name, names, param.default, tp._doc])\n\n def main(self, *args):\n kw_args = {}\n for tp, name, _, _, _ in arguments:\n kw_args[name] = getattr(self, name)\n method(*args, **kw_args)\n\n newclass = type(name+'App', (cli.Application,), {\"main\": main})\n newclass.__doc__ = method.__doc__\n newclass = app.subcommand(name)(newclass)\n\n for tp, name, names, default, doc in arguments:\n if isinstance(tp, Option):\n setattr(newclass, name, cli.SwitchAttr(names, default=default, help=doc))\n elif isinstance(tp, Flag):\n setattr(newclass, name, cli.Flag(names, help=doc))\n return method\n\n return decorator", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "async def hockey_commands(self, ctx: commands.Context) -> None:\n pass", "def _process_command(self, **kwargs):\n return self.run_command(**kwargs)", "def error(self, msg):\n from django.core.management import CommandError\n\n raise CommandError(msg)", "def _command_wrapper(self, cls):\n def command_send(func, args):\n self._con.send(func(*args))\n result = self._recv()\n if len(result) == 0: # Reconnect if no results were received\n self._con.connect()\n self._recv()\n return command_send(func, args)\n parsed_result = Parser.parse(result)\n if not parsed_result:\n raise MPDCommandError(\"Wrong command usage or insufficient permissions: {}\".format(result))\n return parsed_result\n\n class Wrapper(cls):\n \"\"\"\n The actual class that wraps Commands\n It generates new functions to each of Commands attributes, and all it does is to wrap\n Commands attributes with command_send.\n \"\"\"\n def __init__(self):\n pass\n\n def __getattr__(self, item):\n not_wrapped = cls.__getattr__(item)\n\n def f(*args):\n return command_send(not_wrapped, args)\n\n f.__doc__ = not_wrapped.__doc__\n f.__name__ = not_wrapped.__name__\n return f\n\n return Wrapper()", "def manage(ctx, user_cmd):\n ScalingoInterface(ctx.obj).manage_py(user_cmd)", "def cmd_not_found(special_handlers) -> callable:\n\n def cmd_error_decorator(func):\n special_handlers.update({\"cmd_not_found\": func})\n return func\n\n return cmd_error_decorator", "async def on_command_error(\n self,\n ctx: commands.Context,\n error: commands.CommandError\n ):\n # Skips errors that were already handled locally.\n if getattr(ctx, 'handled', False):\n return\n\n if isinstance(error, commands.NoPrivateMessage):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Command Can Not Be Used In Direct Messages`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.TooManyArguments):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Passed In Too Many Arguments`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.NSFWChannelRequired):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`This Channel Is Not NSFW`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.CommandNotFound):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Not Found`', #Todo - Possibly remove this\n color=0xFF0000 #Because its kinda annoying ngl\n )\n await ctx.send(embed=embed)\n \n elif isinstance(error, discord.Forbidden):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Discord Is Restricting Command Execution`',\n color=0xFF0000\n )\n embed.add_field(\n name='Possiblities',\n value='`You Are Trying To Use This Command On Someone Who Is Higher Than Either The Bot Or You`',\n inline=True\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.MissingRequiredArgument):\n embed = discord.Embed(\n title='Oops!',\n description=f'Command Failed To Execute. Reason:\\n`Missing Required Argument:`\\n`{error.param.name}`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif (\n isinstance(error, commands.NotOwner)\n or isinstance(error, commands.MissingPermissions)\n ):\n embed = discord.Embed(\n title='Oops',\n description='Command Failed To Execute. Reason:\\n`Missing Permissions`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif (\n isinstance(error, commands.CommandOnCooldown)\n or isinstance(error, commands.CheckFailure)\n ):\n embed = discord.Embed(\n title='Oops',\n description='Command Failed To Execute. Reason\\n```{error}```',\n color=0xFF0000\n ) \n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.DisabledCommand): #SoonTM\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Command Is Disabled`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.BadArgument):\n embed = discord.Embed(\n title='Oops!',\n description=f'Command Failed To Execute. Reason:\\n`Bad Argument`\\n```{error}```',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.BotMissingPermissions):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`Bot Is Missing Permissions`',\n color=0xFF0000\n )\n await ctx.send(embed=embed)\n log.error(\n f'{ctx.command.qualified_name} cannot be executed because the '\n f'bot is missing the following permissions: '\n f'{\", \".join(error.list)}'\n )\n\n elif isinstance(error, commands.CommandInvokeError):\n embed = discord.Embed(\n title='Oops!',\n description='Command Failed To Execute. Reason:\\n`INTERNAL ERROR`',\n color=0xFF0000 \n )\n embed.set_footer(text='Please Contact Tylerr#6979 For Help')\n await ctx.send(embed=embed)\n log.error(\n f'{ctx.command.qualified_name} failed to execute. ',\n exc_info=error.original\n )", "def handle_warning(self, api, command):\n return self.handle_log(api, command, level=logging.WARNING)", "def handleExceptionsDecorator(object):\n\n\t\t@functools.wraps(object)\n\t\tdef handleExceptionsWrapper(*args, **kwargs):\n\t\t\t\"\"\"\n\t\t\tThis decorator is used for exceptions handling.\n\n\t\t\t:param \\*args: Arguments. ( \\* )\n\t\t\t:param \\*\\*kwargs: Keywords arguments. ( \\*\\* )\n\t\t\t\"\"\"\n\n\t\t\t_exceptions__frame__ = True\n\n\t\t\ttry:\n\t\t\t\treturn object(*args, **kwargs)\n\t\t\texcept exceptions as error:\n\t\t\t\tfor handler in handlers:\n\t\t\t\t\thandler(error)\n\n\t\treturn handleExceptionsWrapper", "def role_command():", "def command(self, function=None, prefix=None):\n def _command(func):\n captured_f = self.capture(func, prefix=prefix)\n self.commands[func.__name__] = captured_f\n return captured_f\n\n if function is not None:\n return _command(function)\n else:\n return _command", "async def custom(self, ctx):\n if ctx.invoked_subcommand is None:\n raise commands.CommandNotFound(\"Subcommand '{}' does not exist.\".format(ctx.subcommand_passed))", "def on_commands(\n self,\n commands,\n priority=0,\n ):\n\n def decorator(coro):\n router = CommandsRouter(priority=priority)\n for command in commands:\n router.add_handler(command, coro)\n\n self._routers.append(router)\n\n return coro\n\n return decorator", "def register_command(*parse_args, **options):\n def wrapper(function):\n function._is_command = True\n return function\n return wrapper", "def command(syntax, namespace=None):\n\n def command_wrapper(f):\n\n @wraps(f)\n def _wrapper(*args, **kwargs):\n return f(*args, **kwargs)\n\n _command(f, _wrapper, syntax, namespace)\n return _wrapper\n return command_wrapper", "def custom(self, command):\n self.command.append(command)\n return self", "async def admin(self, ctx: MyContext):\n if ctx.subcommand_passed is None:\n await ctx.send_help(\"wormhole admin\")", "def handle_admincommands(bot, ievent):\n cmnds = getcmndtable()\n if not ievent.rest: ievent.reply(\"commands: \", cmnds)\n else:\n try: ievent.reply(\"%s command is found in %s \" % (ievent.rest, cmnds[ievent.rest]))\n except KeyError: ievent.reply(\"no such commands available\")", "async def _admin(self, ctx: commands.Context):\n await ctx.send_help()", "def command(self, func=None, name=None, usage=None, help_text=None,\n options=None):\n def decorator(func):\n def method_proxy(*a, **kw):\n return func(*a, **kw)\n if not callable(func):\n raise ValueError(\"function must be callable!\")\n _name = name or func.__name__\n method_proxy.name = _name\n method_proxy.__name__ = _name\n method_proxy.usage = \\\n usage if usage is not None else \"\"\n method_proxy.help_text = \\\n help_text if help_text is not None else func.__doc__\n method_proxy.options = options\n self._commands[_name] = method_proxy\n return func\n\n # If func is not None, it means the method\n # has been used either as a simple decorator\n # or as a plain method. The function will be\n # registered by calling decorator, otherwise,\n # decorator will be returned.\n if func:\n return decorator(func)\n return decorator", "def command_entry_point(function):\n\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n \"\"\" Do housekeeping tasks and execute the wrapped method. \"\"\"\n\n try:\n logging.basicConfig(format='%(name)s: %(message)s',\n level=logging.WARNING,\n stream=sys.stdout)\n # This hack to get the executable name as %(name).\n logging.getLogger().name = os.path.basename(sys.argv[0])\n return function(*args, **kwargs)\n except KeyboardInterrupt:\n logging.warning('Keyboard interrupt')\n return 130 # Signal received exit code for bash.\n except Exception:\n logging.exception('Internal error.')\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.error(\"Please report this bug and attach the output \"\n \"to the bug report\")\n else:\n logging.error(\"Please run this command again and turn on \"\n \"verbose mode (add '-vvvv' as argument).\")\n return 64 # Some non used exit code for internal errors.\n finally:\n logging.shutdown()\n\n return wrapper", "def _handle_command(self, command: Command) -> None:\n if isinstance(command.result, LoadLabwareResult):\n # If the labware load refers to an offset, that offset must actually exist.\n if command.result.offsetId is not None:\n assert command.result.offsetId in self._state.labware_offsets_by_id\n\n definition_uri = uri_from_details(\n namespace=command.result.definition.namespace,\n load_name=command.result.definition.parameters.loadName,\n version=command.result.definition.version,\n )\n\n self._state.definitions_by_uri[definition_uri] = command.result.definition\n\n self._state.labware_by_id[\n command.result.labwareId\n ] = LoadedLabware.construct(\n id=command.result.labwareId,\n location=command.params.location,\n loadName=command.result.definition.parameters.loadName,\n definitionUri=definition_uri,\n offsetId=command.result.offsetId,\n displayName=command.params.displayName,\n )\n\n elif isinstance(command.result, MoveLabwareResult):\n labware_id = command.params.labwareId\n new_location = command.params.newLocation\n new_offset_id = command.result.offsetId\n\n self._state.labware_by_id[labware_id].offsetId = new_offset_id\n self._state.labware_by_id[labware_id].location = new_location", "def command(\n self, name_or_function=None, description=None, show_if=True, args_opts=None\n ):\n\n def decorator_register(func, name=None):\n \"\"\" Function wrapper used as decorator\n\n As we need access to self, we cannot use wrap from functools.\n \"\"\"\n self.register_command(func, name, description, show_if, args_opts)\n\n def func_wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return func_wrapper\n\n if isfunction(name_or_function):\n function = name_or_function\n return decorator_register(function)\n name = name_or_function\n return partial(decorator_register, name=name)", "def command_help(self, command):\n self.commands[command].command_help()", "async def on_command_error(self, ctx: Context, e: commands.CommandError) -> None:\n if hasattr(ctx.command, \"on_error\"):\n return\n\n e = getattr(e, \"original\", e)\n\n await ctx.message.add_reaction(\"\\U0000274c\")\n\n embed = DefaultEmbed(ctx, title=\"**An error has occurred:**\")\n\n if isinstance(e, commands.DisabledCommand):\n embed.description = \"Command not currently enabled.\"\n\n elif isinstance(e, commands.UserInputError):\n embed.description = f\"Command received bad argument: {e}.\"\n\n elif isinstance(e, commands.NotOwner):\n embed.description = \"You do not have enough permissions for this command.\"\n\n elif isinstance(e, commands.CommandOnCooldown):\n embed.description = f\"{e}.\"\n\n elif isinstance(e, commands.CheckFailure):\n embed.description = \"You do not have enough permissions to run this command.\"\n\n elif isinstance(e, commands.MissingPermissions):\n embed.description = \"Bot does not have enough permissions for this command.\"\n\n elif isinstance(e, commands.CommandNotFound):\n embed.description = \"Unknown command.\"\n\n else:\n embed.description = f\"{type(e).__name__}: {e}\"\n\n log.error(\"An error has occurred.\", exc_info=(type(e), e, e.__traceback__))\n\n embed.description = f\"`{embed.description}`\"\n\n await ctx.send(embed=embed)", "def basecommand(func):\n # classname = inspect.getouterframes(inspect.currentframe())[1][3]\n # name = func.__name__\n # help_name = name.replace(\"do_\", \"help_\")\n doc = textwrap.dedent(func.__doc__)\n\n def new(instance, args):\n # instance.new.__doc__ = doc\n # noinspection PyUnusedLocal\n try:\n # print(\"ARGS\", args)\n argv = shlex.split(args)\n # print (\"ARGV\", argv)\n arguments = docopt(doc, help=True, argv=argv)\n func(instance, args, arguments)\n # except docopt.DocoptExit as e:\n except Exception as e: # noqa: F841\n import traceback\n import sys\n traceback.print_exc(file=sys.stdout)\n if args not in ('-h', '--help'):\n print(args)\n Console.error(\"Could not execute the command.\")\n Console.error(\"Check usage..\")\n print(doc)\n\n except SystemExit as e:\n if args not in ('-h', '--help'):\n print(args)\n Console.error(\"Could not execute the command.\")\n Console.error(\"Check usage..\")\n print(e)\n print(doc)\n\n new.__doc__ = doc\n return new", "def do_not_limit(command):\n if not isinstance(command, commands.Command):\n raise TypeError(\"one_command.not_limited is a decorator for Commands\")\n\n exempted.append(command)\n return command", "def delegator():\n import delegator\n\n def run(command):\n if \"|\" in command:\n res = delegator.chain(command)\n else:\n res = delegator.run(command)\n if res.return_code == 127:\n raise FileNotFoundError(res.err)\n if res.return_code != 0:\n raise RuntimeError(res.err)\n if res.err:\n sys.stderr.write(res.err)\n if not res.err.endswith(os.linesep):\n sys.stderr.write(os.linesep)\n\n return res.out.strip()\n\n return run", "def mock_command(mocker):\n def _mock_command(command_class):\n mocker.patch.object(command_class, 'add_arguments', return_value=None)\n mocker.patch.object(command_class, 'pre_add_arguments', return_value=None)\n instance = command_class()\n\n if hasattr(instance, 'host'):\n instance.host.name = '==MOCKED_HOST=='\n\n return instance\n\n return _mock_command", "def __call__(self, cmd):\n cmdname = cmd.name\n self.commands[cmdname] = self._prepare_cmd(cmd)\n return cmd", "def manage(command, noinput=True):\n noinput = '--noinput' if noinput else ''\n run('envdir {bundle_root}/envdir {bundle_root}/env/bin/django-admin.py '\n '{command} {noinput}'.format(bundle_root=env.bundle_root,\n command=command, noinput=noinput))", "def manage_py(self, management_command_name, **options):\n cmd = f\"python manage.py {management_command_name}\"\n for name, val in options.items():\n cmd += f\" --{name} {val}\"\n self.run(cmd)", "def manager():\n pass", "def administrator(method):\n\t@functools.wraps(method)\n\tdef wrapper(self, *args, **kwargs):\n\t\tif not self.current_user:\n\t\t\tif self.request.method == \"GET\":\n\t\t\t\tself.redirect(self.get_login_url())\n\t\t\t\treturn\n\t\t\traise tornado.web.HTTPError(403)\n\t\telif not self.current_user.administrator:\n\t\t\tif self.request.method == \"GET\":\n\t\t\t\tself.redirect(\"/\")\n\t\t\t\treturn\n\t\t\traise tornado.web.HTTPError(403)\n\t\telse:\n\t\t\treturn method(self, *args, **kwargs)\n\treturn wrapper", "def mainCommand(self, args):\r\n command = args.pop(0).lower() # calls exception if no arguments present\r\n if command in vars(CommandManager):\r\n vars(CommandManager)[command](self, *args) # calls exception if wrong amount of arguments\r", "def command_type(command_name):\n def decorator(cls):\n \"\"\"\"\"\"\n CMD_TYPES[command_name] = cls\n return cls\n\n return decorator", "def side_effect(command, stdout, stderr):\n if any('always-auth' in arg for arg in command):\n raise subprocess.CalledProcessError(\n returncode=1,\n cmd=command\n )\n\n return mock.DEFAULT", "async def help_command(self, ctx, *, cmd_name: str=None):\n bot_prefix = '@Randy '\n # Get command object\n cmd_obj = self.cmd(cmd_name)\n\n # Handle no command found\n if cmd_obj is None:\n return await ctx.error(f'Command {cmd_name} not found')\n em = discord.Embed(title=cmd_obj.name, description=cmd_obj.help, color=self.color)\n\n # Input aliases and parameters to embed\n if cmd_obj.aliases:\n em.add_field(name='Aliases', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.aliases]))\n if cmd_obj.clean_params:\n em.add_field(name='Parameters', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.clean_params]))\n\n # Handle group commands\n if isinstance(cmd_obj, commands.core.Group):\n em.add_field(name='Group commands',\n value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.commands]),\n inline=False)\n\n # Add usage last\n em.add_field(name='Usage',\n value=f'```{bot_prefix}\\u200b{cmd_name} '\n f'{\" \".join([f\"<{x}>\" for x in cmd_obj.clean_params])}```',\n inline=False)\n\n await ctx.send(embed=em)", "def on_cmd(self,\n command: str,\n about: Union[str, Dict[str, Union[str, List[str], Dict[str, str]]]],\n *,\n group: int = 0,\n name: str = '',\n trigger: str = Config.CMD_TRIGGER,\n filter_me: bool = True,\n allow_private: bool = True,\n allow_bots: bool = True,\n allow_groups: bool = True,\n allow_channels: bool = True,\n only_admins: bool = False,\n allow_via_bot: bool = True,\n check_client: bool = False,\n check_downpath: bool = False,\n check_change_info_perm: bool = False,\n check_edit_perm: bool = False,\n check_delete_perm: bool = False,\n check_restrict_perm: bool = False,\n check_promote_perm: bool = False,\n check_invite_perm: bool = False,\n check_pin_perm: bool = False,\n **kwargs: Union[str, bool]\n ) -> RawDecorator._PYRORETTYPE:\n return self._build_decorator(\n types.raw.Command.parse(command, about,\n trigger, name, filter_me,\n client=self,\n group=group,\n allow_private=allow_private,\n allow_bots=allow_bots,\n allow_groups=allow_groups,\n allow_channels=allow_channels,\n only_admins=only_admins,\n allow_via_bot=allow_via_bot,\n check_client=check_client,\n check_downpath=check_downpath,\n check_change_info_perm=check_change_info_perm,\n check_edit_perm=check_edit_perm,\n check_delete_perm=check_delete_perm,\n check_restrict_perm=check_restrict_perm,\n check_promote_perm=check_promote_perm,\n check_invite_perm=check_invite_perm,\n check_pin_perm=check_pin_perm), **kwargs)", "def handle(self, *args, **options):\n raise NotImplementedError()", "async def on_command_error(\n self, ctx: commands.Context, error: commands.CommandError\n ) -> None:\n if getattr(error, \"handled\", False):\n logger.debug(\n f\"Command {ctx.command} had its error already handled locally; ignoring.\"\n )\n return\n\n error = getattr(error, \"original\", error)\n\n if isinstance(error, commands.CommandNotFound):\n return # Skip logging CommandNotFound Error\n\n elif isinstance(error, commands.UserInputError):\n if isinstance(error, commands.MissingRequiredArgument):\n description = (\n f\"`{error.param.name}` is a required argument that is missing.\"\n \"\\n\\nUsage:\\n\"\n f\"```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n else:\n description = (\n f\"Your input was invalid: {error}\\n\\nUsage:\\n\"\n f\"```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n\n embed = self.error_embed(description)\n await ctx.send(embed=embed)\n\n elif isinstance(error, commands.CommandOnCooldown):\n mins, secs = divmod(math.ceil(error.retry_after), 60)\n embed = self.error_embed(\n f\"This command is on cooldown:\\nPlease retry in **{mins} minutes {secs} seconds**.\"\n )\n await ctx.send(embed=embed, delete_after=10)\n\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(embed=self.error_embed(\"This command has been disabled.\"))\n\n elif isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\n embed=self.error_embed(\"This command can only be used in the server.\")\n )\n\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(\n embed=self.error_embed(\"You aren't allowed to use this command.\")\n )\n\n elif isinstance(error, commands.BadArgument):\n self.revert_cooldown_counter(ctx.command, ctx.message)\n embed = self.error_embed(\n \"The argument you provided was invalid: \"\n f\"{error}\\n\\nUsage:\\n```{ctx.prefix}{ctx.command} {ctx.command.signature}```\"\n )\n await ctx.send(embed=embed)\n else:\n await self.handle_unexpected_error(ctx, error)\n return # Exit early to avoid logging.\n\n logger.debug(\n f\"Error Encountered: {type(error).__name__} - {str(error)}, \"\n f\"Command: {ctx.command}, \"\n f\"Author: {ctx.author}, \"\n f\"Channel: {ctx.channel}\"\n )", "async def cog_command_error(self, ctx: Context, error: CommandInvokeError):\n if isinstance(error.original, NoRolesError):\n await error.original.handle_error(ctx)\n else:\n await super().cog_command_error(ctx, error)", "async def logger(self, ctx):\n await util.command_group_help(ctx)", "async def clean_cmd(self, ctx):\n await self.liara.send_command_help(ctx)", "def management(self):\r\n return management.Management(self)", "async def on_command_error(ctx: commands.Context, error: commands.CommandError):\n if isinstance(error, commands.CommandNotFound):\n message = f\"This command is not listed in {bot.user} dictionary. Please try again.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n # return # Return because we don't want to show an error for every command not found\n elif isinstance(error, commands.CommandOnCooldown):\n message = f\"This command is on cooldown. Please try again after {round(error.retry_after, 1)} seconds.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.MissingPermissions):\n message = \"You are missing the required permissions to run this command!\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.NoPrivateMessage):\n message = \"Private messages only. How cute.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.MissingRequiredArgument):\n message = \"Command is missing an argument. Try again.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, commands.CheckFailure):\n message = \"You do not have the permissions to do this.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n elif isinstance(error, (commands.MissingRole, commands.MissingAnyRole)):\n message = \"You don't have any role to run this command.\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)\n else:\n message = \"Oh no! Something went wrong while running the command!\"\n embed = discord.Embed(title=f\"Woah, woah!\", description=message, colour=0xd80000)\n await ctx.send(embed=embed, delete_after=5)", "def runCommand(self): \\\n # pylint: disable=no-self-use", "async def manage(self, msg, *args):\n if len(args) < 3:\n return\n sub_command = args[0]\n if sub_command not in ['add', 'remove']:\n return\n var = args[1]\n if var not in ['BANNED_USERS', 'TRUSTED_ROLES', 'SUDO_IDS', 'SUDO_CHANNELS']:\n return\n if var == 'BANNED_USERS':\n var = Guard.BANNED_USERS\n elif var == 'TRUSTED_ROLES':\n var = Guard.TRUSTED_ROLES\n elif var == 'SUDO_IDS':\n var = Guard.SUDO_IDS\n elif var == 'SUDO_CHANNELS':\n var = Guard.SUDO_CHANNELS\n else:\n return\n entityids = args[2:]\n for entityid in entityids:\n if sub_command == 'add':\n var.add(int(entityid))\n elif sub_command == 'remove':\n var.remove(int(entityid))\n await msg.add_reaction('🆗')\n if self.author_dm is None:\n self.author_dm = await client.fetch_channel(Guard.AUTHOR_DM)\n await self.author_dm.send(**{\n 'content': f'{msg.author} ({msg.author.id}): {args}',\n })", "def handle_command(command, event, bot):\n print('slack::cmd::{}'.format(command))\n\n cmd_list = command.split(' ')\n cmd = cmd_list[0].lower()\n args = cmd_list[1:] if len(cmd_list) else 0\n\n if cmd == 'help':\n response, success = handle_command_help()\n\n elif cmd == 'accounts':\n response, success = handle_command_accounts(args, event, bot)\n\n elif cmd == 'assets':\n response, success = handle_command_assets(args, event, bot)\n\n elif cmd == 'publish':\n response, success = handle_command_publish(args, event, bot)\n\n elif cmd == 'self':\n response, success = handle_command_self(args, event, bot)\n\n elif 'reaction_' in cmd:\n response, success = handle_command_reaction(args, event, bot)\n else:\n response, success = handle_command_help()\n\n print('slack::cmd::{}::success::{}'.format(command, success))\n return success, response", "def wrapper(self, *args, **kwargs):\n self.positive = func.__name__[5:] in self.perms\n func.__dict__['role'] = kwargs_glob.get('role')\n\n if self.previous_login != login_as:\n login_as_user(login_as, self.filter_)\n self.previous_login = login_as\n\n try:\n func(self, *args, **kwargs)\n except EntityNotFound as err:\n if self.positive:\n raise err\n else:\n logger.warning(err)\n except AttributeError as err:\n if self.positive:\n raise err\n else:\n logger.warning(err)\n # Case failed, clean even if not positive.\n except AssertionError as err:\n self.positive = True\n raise err\n finally:\n if self.positive and cleanup_func is not None:\n self.cleanup_functions.append(\n {\n 'func': cleanup_func,\n 'params': kwargs_glob,\n }\n )" ]
[ "0.5904763", "0.578845", "0.5735835", "0.56567407", "0.56270766", "0.5600731", "0.5581101", "0.5554357", "0.5539104", "0.55316645", "0.5529846", "0.55071104", "0.5505431", "0.5505338", "0.5483254", "0.54173476", "0.54166645", "0.53439975", "0.53280574", "0.5321739", "0.52679396", "0.52521676", "0.52418435", "0.5196691", "0.5196499", "0.517249", "0.5151756", "0.51446766", "0.5133573", "0.5130136", "0.51095736", "0.5108292", "0.50889945", "0.50457233", "0.502257", "0.5006005", "0.5003528", "0.4994678", "0.4981738", "0.49704355", "0.49556625", "0.4955625", "0.49486312", "0.4944799", "0.4937824", "0.49309123", "0.4929016", "0.49208415", "0.49166185", "0.49126762", "0.49098343", "0.49036166", "0.49010766", "0.48547214", "0.48502898", "0.48492953", "0.4843816", "0.4840775", "0.4840002", "0.48371917", "0.4816743", "0.4812189", "0.4807016", "0.47994977", "0.47940865", "0.47924155", "0.47895294", "0.4782585", "0.47817686", "0.47811353", "0.47752526", "0.47733516", "0.47684282", "0.47546676", "0.47521013", "0.47408846", "0.47397533", "0.47370076", "0.4731509", "0.47303647", "0.47295526", "0.47258702", "0.4724921", "0.47215357", "0.4720211", "0.47198588", "0.4719248", "0.4719134", "0.47176906", "0.47161052", "0.47076616", "0.4705558", "0.47018555", "0.46996894", "0.46971938", "0.4697047", "0.4695308", "0.46938664", "0.46926636", "0.4689692" ]
0.62160754
0